From ba026602a673677735428e64e621cdf95b5cd6d9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Mon, 11 Sep 2017 17:49:36 -0300 Subject: tcg/ppc: disable atomic write check on ppc32 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This fixes building for ppc64 on ppc32 (changed in 5964fca8a12c): tcg/ppc/tcg-target.inc.c: In function 'tb_target_set_jmp_target': include/qemu/compiler.h:86:30: error: static assertion failed: \ "not expecting: sizeof(*(uint64_t *)jmp_addr) > ATOMIC_REG_SIZE" QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ ^ tcg/ppc/tcg-target.inc.c:1377:9: note: in expansion of macro 'atomic_set' atomic_set((uint64_t *)jmp_addr, pair); ^ Suggested-by: Richard Henderson Signed-off-by: Philippe Mathieu-Daudé Message-Id: <20170911204936.5020-1-f4bug@amsat.org> [rth: Added commentary requested by pmm.] Signed-off-by: Richard Henderson --- tcg/ppc/tcg-target.inc.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c index 21d764c..3c93558 100644 --- a/tcg/ppc/tcg-target.inc.c +++ b/tcg/ppc/tcg-target.inc.c @@ -1374,7 +1374,9 @@ void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr, pair = (uint64_t)i2 << 32 | i1; #endif - atomic_set((uint64_t *)jmp_addr, pair); + /* As per the enclosing if, this is ppc64. Avoid the _Static_assert + within atomic_set that would fail to build a ppc32 host. */ + atomic_set__nocheck((uint64_t *)jmp_addr, pair); flush_icache_range(jmp_addr, jmp_addr + 8); } else { intptr_t diff = addr - jmp_addr; -- cgit v1.1 From da1849c1eba50aa372f87c7945d7b230eb2b2fb2 Mon Sep 17 00:00:00 2001 From: Thomas Huth Date: Mon, 11 Sep 2017 18:33:24 -0300 Subject: accel/tcg: move softmmu_template.h to accel/tcg/ MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The header is only used by accel/tcg/cputlb.c so we can move it to the accel/tcg/ folder, too. Signed-off-by: Thomas Huth [PMD: reword commit title to match series] Signed-off-by: Philippe Mathieu-Daudé Message-Id: <20170911213328.9701-2-f4bug@amsat.org> Signed-off-by: Richard Henderson --- MAINTAINERS | 1 - accel/tcg/softmmu_template.h | 433 +++++++++++++++++++++++++++++++++++++++++++ softmmu_template.h | 433 ------------------------------------------- 3 files changed, 433 insertions(+), 434 deletions(-) create mode 100644 accel/tcg/softmmu_template.h delete mode 100644 softmmu_template.h diff --git a/MAINTAINERS b/MAINTAINERS index 2c333ab..2127bb2 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -86,7 +86,6 @@ M: Richard Henderson S: Maintained F: cpus.c F: exec.c -F: softmmu_template.h F: accel/tcg/ F: include/exec/cpu*.h F: include/exec/exec-all.h diff --git a/accel/tcg/softmmu_template.h b/accel/tcg/softmmu_template.h new file mode 100644 index 0000000..d756329 --- /dev/null +++ b/accel/tcg/softmmu_template.h @@ -0,0 +1,433 @@ +/* + * Software MMU support + * + * Generate helpers used by TCG for qemu_ld/st ops and code load + * functions. + * + * Included from target op helpers and exec.c. + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ +#if DATA_SIZE == 8 +#define SUFFIX q +#define LSUFFIX q +#define SDATA_TYPE int64_t +#define DATA_TYPE uint64_t +#elif DATA_SIZE == 4 +#define SUFFIX l +#define LSUFFIX l +#define SDATA_TYPE int32_t +#define DATA_TYPE uint32_t +#elif DATA_SIZE == 2 +#define SUFFIX w +#define LSUFFIX uw +#define SDATA_TYPE int16_t +#define DATA_TYPE uint16_t +#elif DATA_SIZE == 1 +#define SUFFIX b +#define LSUFFIX ub +#define SDATA_TYPE int8_t +#define DATA_TYPE uint8_t +#else +#error unsupported data size +#endif + + +/* For the benefit of TCG generated code, we want to avoid the complication + of ABI-specific return type promotion and always return a value extended + to the register size of the host. This is tcg_target_long, except in the + case of a 32-bit host and 64-bit data, and for that we always have + uint64_t. Don't bother with this widened value for SOFTMMU_CODE_ACCESS. */ +#if defined(SOFTMMU_CODE_ACCESS) || DATA_SIZE == 8 +# define WORD_TYPE DATA_TYPE +# define USUFFIX SUFFIX +#else +# define WORD_TYPE tcg_target_ulong +# define USUFFIX glue(u, SUFFIX) +# define SSUFFIX glue(s, SUFFIX) +#endif + +#ifdef SOFTMMU_CODE_ACCESS +#define READ_ACCESS_TYPE MMU_INST_FETCH +#define ADDR_READ addr_code +#else +#define READ_ACCESS_TYPE MMU_DATA_LOAD +#define ADDR_READ addr_read +#endif + +#if DATA_SIZE == 8 +# define BSWAP(X) bswap64(X) +#elif DATA_SIZE == 4 +# define BSWAP(X) bswap32(X) +#elif DATA_SIZE == 2 +# define BSWAP(X) bswap16(X) +#else +# define BSWAP(X) (X) +#endif + +#if DATA_SIZE == 1 +# define helper_le_ld_name glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX) +# define helper_be_ld_name helper_le_ld_name +# define helper_le_lds_name glue(glue(helper_ret_ld, SSUFFIX), MMUSUFFIX) +# define helper_be_lds_name helper_le_lds_name +# define helper_le_st_name glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX) +# define helper_be_st_name helper_le_st_name +#else +# define helper_le_ld_name glue(glue(helper_le_ld, USUFFIX), MMUSUFFIX) +# define helper_be_ld_name glue(glue(helper_be_ld, USUFFIX), MMUSUFFIX) +# define helper_le_lds_name glue(glue(helper_le_ld, SSUFFIX), MMUSUFFIX) +# define helper_be_lds_name glue(glue(helper_be_ld, SSUFFIX), MMUSUFFIX) +# define helper_le_st_name glue(glue(helper_le_st, SUFFIX), MMUSUFFIX) +# define helper_be_st_name glue(glue(helper_be_st, SUFFIX), MMUSUFFIX) +#endif + +#ifndef SOFTMMU_CODE_ACCESS +static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env, + size_t mmu_idx, size_t index, + target_ulong addr, + uintptr_t retaddr) +{ + CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index]; + return io_readx(env, iotlbentry, mmu_idx, addr, retaddr, DATA_SIZE); +} +#endif + +WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr) +{ + unsigned mmu_idx = get_mmuidx(oi); + int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; + unsigned a_bits = get_alignment_bits(get_memop(oi)); + uintptr_t haddr; + DATA_TYPE res; + + if (addr & ((1 << a_bits) - 1)) { + cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, + mmu_idx, retaddr); + } + + /* If the TLB entry is for a different page, reload and try again. */ + if ((addr & TARGET_PAGE_MASK) + != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { + if (!VICTIM_TLB_HIT(ADDR_READ, addr)) { + tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, + mmu_idx, retaddr); + } + tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; + } + + /* Handle an IO access. */ + if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { + if ((addr & (DATA_SIZE - 1)) != 0) { + goto do_unaligned_access; + } + + /* ??? Note that the io helpers always read data in the target + byte ordering. We should push the LE/BE request down into io. */ + res = glue(io_read, SUFFIX)(env, mmu_idx, index, addr, retaddr); + res = TGT_LE(res); + return res; + } + + /* Handle slow unaligned access (it spans two pages or IO). */ + if (DATA_SIZE > 1 + && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 + >= TARGET_PAGE_SIZE)) { + target_ulong addr1, addr2; + DATA_TYPE res1, res2; + unsigned shift; + do_unaligned_access: + addr1 = addr & ~(DATA_SIZE - 1); + addr2 = addr1 + DATA_SIZE; + res1 = helper_le_ld_name(env, addr1, oi, retaddr); + res2 = helper_le_ld_name(env, addr2, oi, retaddr); + shift = (addr & (DATA_SIZE - 1)) * 8; + + /* Little-endian combine. */ + res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift)); + return res; + } + + haddr = addr + env->tlb_table[mmu_idx][index].addend; +#if DATA_SIZE == 1 + res = glue(glue(ld, LSUFFIX), _p)((uint8_t *)haddr); +#else + res = glue(glue(ld, LSUFFIX), _le_p)((uint8_t *)haddr); +#endif + return res; +} + +#if DATA_SIZE > 1 +WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr) +{ + unsigned mmu_idx = get_mmuidx(oi); + int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; + unsigned a_bits = get_alignment_bits(get_memop(oi)); + uintptr_t haddr; + DATA_TYPE res; + + if (addr & ((1 << a_bits) - 1)) { + cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, + mmu_idx, retaddr); + } + + /* If the TLB entry is for a different page, reload and try again. */ + if ((addr & TARGET_PAGE_MASK) + != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { + if (!VICTIM_TLB_HIT(ADDR_READ, addr)) { + tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, + mmu_idx, retaddr); + } + tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; + } + + /* Handle an IO access. */ + if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { + if ((addr & (DATA_SIZE - 1)) != 0) { + goto do_unaligned_access; + } + + /* ??? Note that the io helpers always read data in the target + byte ordering. We should push the LE/BE request down into io. */ + res = glue(io_read, SUFFIX)(env, mmu_idx, index, addr, retaddr); + res = TGT_BE(res); + return res; + } + + /* Handle slow unaligned access (it spans two pages or IO). */ + if (DATA_SIZE > 1 + && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 + >= TARGET_PAGE_SIZE)) { + target_ulong addr1, addr2; + DATA_TYPE res1, res2; + unsigned shift; + do_unaligned_access: + addr1 = addr & ~(DATA_SIZE - 1); + addr2 = addr1 + DATA_SIZE; + res1 = helper_be_ld_name(env, addr1, oi, retaddr); + res2 = helper_be_ld_name(env, addr2, oi, retaddr); + shift = (addr & (DATA_SIZE - 1)) * 8; + + /* Big-endian combine. */ + res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift)); + return res; + } + + haddr = addr + env->tlb_table[mmu_idx][index].addend; + res = glue(glue(ld, LSUFFIX), _be_p)((uint8_t *)haddr); + return res; +} +#endif /* DATA_SIZE > 1 */ + +#ifndef SOFTMMU_CODE_ACCESS + +/* Provide signed versions of the load routines as well. We can of course + avoid this for 64-bit data, or for 32-bit data on 32-bit host. */ +#if DATA_SIZE * 8 < TCG_TARGET_REG_BITS +WORD_TYPE helper_le_lds_name(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr) +{ + return (SDATA_TYPE)helper_le_ld_name(env, addr, oi, retaddr); +} + +# if DATA_SIZE > 1 +WORD_TYPE helper_be_lds_name(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr) +{ + return (SDATA_TYPE)helper_be_ld_name(env, addr, oi, retaddr); +} +# endif +#endif + +static inline void glue(io_write, SUFFIX)(CPUArchState *env, + size_t mmu_idx, size_t index, + DATA_TYPE val, + target_ulong addr, + uintptr_t retaddr) +{ + CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index]; + return io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr, DATA_SIZE); +} + +void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, + TCGMemOpIdx oi, uintptr_t retaddr) +{ + unsigned mmu_idx = get_mmuidx(oi); + int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write; + unsigned a_bits = get_alignment_bits(get_memop(oi)); + uintptr_t haddr; + + if (addr & ((1 << a_bits) - 1)) { + cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, + mmu_idx, retaddr); + } + + /* If the TLB entry is for a different page, reload and try again. */ + if ((addr & TARGET_PAGE_MASK) + != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { + if (!VICTIM_TLB_HIT(addr_write, addr)) { + tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); + } + tlb_addr = env->tlb_table[mmu_idx][index].addr_write; + } + + /* Handle an IO access. */ + if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { + if ((addr & (DATA_SIZE - 1)) != 0) { + goto do_unaligned_access; + } + + /* ??? Note that the io helpers always read data in the target + byte ordering. We should push the LE/BE request down into io. */ + val = TGT_LE(val); + glue(io_write, SUFFIX)(env, mmu_idx, index, val, addr, retaddr); + return; + } + + /* Handle slow unaligned access (it spans two pages or IO). */ + if (DATA_SIZE > 1 + && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 + >= TARGET_PAGE_SIZE)) { + int i, index2; + target_ulong page2, tlb_addr2; + do_unaligned_access: + /* Ensure the second page is in the TLB. Note that the first page + is already guaranteed to be filled, and that the second page + cannot evict the first. */ + page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK; + index2 = (page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write; + if (page2 != (tlb_addr2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK)) + && !VICTIM_TLB_HIT(addr_write, page2)) { + tlb_fill(ENV_GET_CPU(env), page2, MMU_DATA_STORE, + mmu_idx, retaddr); + } + + /* XXX: not efficient, but simple. */ + /* This loop must go in the forward direction to avoid issues + with self-modifying code in Windows 64-bit. */ + for (i = 0; i < DATA_SIZE; ++i) { + /* Little-endian extract. */ + uint8_t val8 = val >> (i * 8); + glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8, + oi, retaddr); + } + return; + } + + haddr = addr + env->tlb_table[mmu_idx][index].addend; +#if DATA_SIZE == 1 + glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val); +#else + glue(glue(st, SUFFIX), _le_p)((uint8_t *)haddr, val); +#endif +} + +#if DATA_SIZE > 1 +void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, + TCGMemOpIdx oi, uintptr_t retaddr) +{ + unsigned mmu_idx = get_mmuidx(oi); + int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write; + unsigned a_bits = get_alignment_bits(get_memop(oi)); + uintptr_t haddr; + + if (addr & ((1 << a_bits) - 1)) { + cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, + mmu_idx, retaddr); + } + + /* If the TLB entry is for a different page, reload and try again. */ + if ((addr & TARGET_PAGE_MASK) + != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { + if (!VICTIM_TLB_HIT(addr_write, addr)) { + tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); + } + tlb_addr = env->tlb_table[mmu_idx][index].addr_write; + } + + /* Handle an IO access. */ + if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { + if ((addr & (DATA_SIZE - 1)) != 0) { + goto do_unaligned_access; + } + + /* ??? Note that the io helpers always read data in the target + byte ordering. We should push the LE/BE request down into io. */ + val = TGT_BE(val); + glue(io_write, SUFFIX)(env, mmu_idx, index, val, addr, retaddr); + return; + } + + /* Handle slow unaligned access (it spans two pages or IO). */ + if (DATA_SIZE > 1 + && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 + >= TARGET_PAGE_SIZE)) { + int i, index2; + target_ulong page2, tlb_addr2; + do_unaligned_access: + /* Ensure the second page is in the TLB. Note that the first page + is already guaranteed to be filled, and that the second page + cannot evict the first. */ + page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK; + index2 = (page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write; + if (page2 != (tlb_addr2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK)) + && !VICTIM_TLB_HIT(addr_write, page2)) { + tlb_fill(ENV_GET_CPU(env), page2, MMU_DATA_STORE, + mmu_idx, retaddr); + } + + /* XXX: not efficient, but simple */ + /* This loop must go in the forward direction to avoid issues + with self-modifying code. */ + for (i = 0; i < DATA_SIZE; ++i) { + /* Big-endian extract. */ + uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8)); + glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8, + oi, retaddr); + } + return; + } + + haddr = addr + env->tlb_table[mmu_idx][index].addend; + glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val); +} +#endif /* DATA_SIZE > 1 */ +#endif /* !defined(SOFTMMU_CODE_ACCESS) */ + +#undef READ_ACCESS_TYPE +#undef DATA_TYPE +#undef SUFFIX +#undef LSUFFIX +#undef DATA_SIZE +#undef ADDR_READ +#undef WORD_TYPE +#undef SDATA_TYPE +#undef USUFFIX +#undef SSUFFIX +#undef BSWAP +#undef helper_le_ld_name +#undef helper_be_ld_name +#undef helper_le_lds_name +#undef helper_be_lds_name +#undef helper_le_st_name +#undef helper_be_st_name diff --git a/softmmu_template.h b/softmmu_template.h deleted file mode 100644 index d756329..0000000 --- a/softmmu_template.h +++ /dev/null @@ -1,433 +0,0 @@ -/* - * Software MMU support - * - * Generate helpers used by TCG for qemu_ld/st ops and code load - * functions. - * - * Included from target op helpers and exec.c. - * - * Copyright (c) 2003 Fabrice Bellard - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ -#if DATA_SIZE == 8 -#define SUFFIX q -#define LSUFFIX q -#define SDATA_TYPE int64_t -#define DATA_TYPE uint64_t -#elif DATA_SIZE == 4 -#define SUFFIX l -#define LSUFFIX l -#define SDATA_TYPE int32_t -#define DATA_TYPE uint32_t -#elif DATA_SIZE == 2 -#define SUFFIX w -#define LSUFFIX uw -#define SDATA_TYPE int16_t -#define DATA_TYPE uint16_t -#elif DATA_SIZE == 1 -#define SUFFIX b -#define LSUFFIX ub -#define SDATA_TYPE int8_t -#define DATA_TYPE uint8_t -#else -#error unsupported data size -#endif - - -/* For the benefit of TCG generated code, we want to avoid the complication - of ABI-specific return type promotion and always return a value extended - to the register size of the host. This is tcg_target_long, except in the - case of a 32-bit host and 64-bit data, and for that we always have - uint64_t. Don't bother with this widened value for SOFTMMU_CODE_ACCESS. */ -#if defined(SOFTMMU_CODE_ACCESS) || DATA_SIZE == 8 -# define WORD_TYPE DATA_TYPE -# define USUFFIX SUFFIX -#else -# define WORD_TYPE tcg_target_ulong -# define USUFFIX glue(u, SUFFIX) -# define SSUFFIX glue(s, SUFFIX) -#endif - -#ifdef SOFTMMU_CODE_ACCESS -#define READ_ACCESS_TYPE MMU_INST_FETCH -#define ADDR_READ addr_code -#else -#define READ_ACCESS_TYPE MMU_DATA_LOAD -#define ADDR_READ addr_read -#endif - -#if DATA_SIZE == 8 -# define BSWAP(X) bswap64(X) -#elif DATA_SIZE == 4 -# define BSWAP(X) bswap32(X) -#elif DATA_SIZE == 2 -# define BSWAP(X) bswap16(X) -#else -# define BSWAP(X) (X) -#endif - -#if DATA_SIZE == 1 -# define helper_le_ld_name glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX) -# define helper_be_ld_name helper_le_ld_name -# define helper_le_lds_name glue(glue(helper_ret_ld, SSUFFIX), MMUSUFFIX) -# define helper_be_lds_name helper_le_lds_name -# define helper_le_st_name glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX) -# define helper_be_st_name helper_le_st_name -#else -# define helper_le_ld_name glue(glue(helper_le_ld, USUFFIX), MMUSUFFIX) -# define helper_be_ld_name glue(glue(helper_be_ld, USUFFIX), MMUSUFFIX) -# define helper_le_lds_name glue(glue(helper_le_ld, SSUFFIX), MMUSUFFIX) -# define helper_be_lds_name glue(glue(helper_be_ld, SSUFFIX), MMUSUFFIX) -# define helper_le_st_name glue(glue(helper_le_st, SUFFIX), MMUSUFFIX) -# define helper_be_st_name glue(glue(helper_be_st, SUFFIX), MMUSUFFIX) -#endif - -#ifndef SOFTMMU_CODE_ACCESS -static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env, - size_t mmu_idx, size_t index, - target_ulong addr, - uintptr_t retaddr) -{ - CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index]; - return io_readx(env, iotlbentry, mmu_idx, addr, retaddr, DATA_SIZE); -} -#endif - -WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr) -{ - unsigned mmu_idx = get_mmuidx(oi); - int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); - target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; - unsigned a_bits = get_alignment_bits(get_memop(oi)); - uintptr_t haddr; - DATA_TYPE res; - - if (addr & ((1 << a_bits) - 1)) { - cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, - mmu_idx, retaddr); - } - - /* If the TLB entry is for a different page, reload and try again. */ - if ((addr & TARGET_PAGE_MASK) - != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { - if (!VICTIM_TLB_HIT(ADDR_READ, addr)) { - tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, - mmu_idx, retaddr); - } - tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; - } - - /* Handle an IO access. */ - if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { - if ((addr & (DATA_SIZE - 1)) != 0) { - goto do_unaligned_access; - } - - /* ??? Note that the io helpers always read data in the target - byte ordering. We should push the LE/BE request down into io. */ - res = glue(io_read, SUFFIX)(env, mmu_idx, index, addr, retaddr); - res = TGT_LE(res); - return res; - } - - /* Handle slow unaligned access (it spans two pages or IO). */ - if (DATA_SIZE > 1 - && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 - >= TARGET_PAGE_SIZE)) { - target_ulong addr1, addr2; - DATA_TYPE res1, res2; - unsigned shift; - do_unaligned_access: - addr1 = addr & ~(DATA_SIZE - 1); - addr2 = addr1 + DATA_SIZE; - res1 = helper_le_ld_name(env, addr1, oi, retaddr); - res2 = helper_le_ld_name(env, addr2, oi, retaddr); - shift = (addr & (DATA_SIZE - 1)) * 8; - - /* Little-endian combine. */ - res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift)); - return res; - } - - haddr = addr + env->tlb_table[mmu_idx][index].addend; -#if DATA_SIZE == 1 - res = glue(glue(ld, LSUFFIX), _p)((uint8_t *)haddr); -#else - res = glue(glue(ld, LSUFFIX), _le_p)((uint8_t *)haddr); -#endif - return res; -} - -#if DATA_SIZE > 1 -WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr) -{ - unsigned mmu_idx = get_mmuidx(oi); - int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); - target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; - unsigned a_bits = get_alignment_bits(get_memop(oi)); - uintptr_t haddr; - DATA_TYPE res; - - if (addr & ((1 << a_bits) - 1)) { - cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, - mmu_idx, retaddr); - } - - /* If the TLB entry is for a different page, reload and try again. */ - if ((addr & TARGET_PAGE_MASK) - != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { - if (!VICTIM_TLB_HIT(ADDR_READ, addr)) { - tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, - mmu_idx, retaddr); - } - tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; - } - - /* Handle an IO access. */ - if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { - if ((addr & (DATA_SIZE - 1)) != 0) { - goto do_unaligned_access; - } - - /* ??? Note that the io helpers always read data in the target - byte ordering. We should push the LE/BE request down into io. */ - res = glue(io_read, SUFFIX)(env, mmu_idx, index, addr, retaddr); - res = TGT_BE(res); - return res; - } - - /* Handle slow unaligned access (it spans two pages or IO). */ - if (DATA_SIZE > 1 - && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 - >= TARGET_PAGE_SIZE)) { - target_ulong addr1, addr2; - DATA_TYPE res1, res2; - unsigned shift; - do_unaligned_access: - addr1 = addr & ~(DATA_SIZE - 1); - addr2 = addr1 + DATA_SIZE; - res1 = helper_be_ld_name(env, addr1, oi, retaddr); - res2 = helper_be_ld_name(env, addr2, oi, retaddr); - shift = (addr & (DATA_SIZE - 1)) * 8; - - /* Big-endian combine. */ - res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift)); - return res; - } - - haddr = addr + env->tlb_table[mmu_idx][index].addend; - res = glue(glue(ld, LSUFFIX), _be_p)((uint8_t *)haddr); - return res; -} -#endif /* DATA_SIZE > 1 */ - -#ifndef SOFTMMU_CODE_ACCESS - -/* Provide signed versions of the load routines as well. We can of course - avoid this for 64-bit data, or for 32-bit data on 32-bit host. */ -#if DATA_SIZE * 8 < TCG_TARGET_REG_BITS -WORD_TYPE helper_le_lds_name(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr) -{ - return (SDATA_TYPE)helper_le_ld_name(env, addr, oi, retaddr); -} - -# if DATA_SIZE > 1 -WORD_TYPE helper_be_lds_name(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr) -{ - return (SDATA_TYPE)helper_be_ld_name(env, addr, oi, retaddr); -} -# endif -#endif - -static inline void glue(io_write, SUFFIX)(CPUArchState *env, - size_t mmu_idx, size_t index, - DATA_TYPE val, - target_ulong addr, - uintptr_t retaddr) -{ - CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index]; - return io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr, DATA_SIZE); -} - -void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, - TCGMemOpIdx oi, uintptr_t retaddr) -{ - unsigned mmu_idx = get_mmuidx(oi); - int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); - target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write; - unsigned a_bits = get_alignment_bits(get_memop(oi)); - uintptr_t haddr; - - if (addr & ((1 << a_bits) - 1)) { - cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, - mmu_idx, retaddr); - } - - /* If the TLB entry is for a different page, reload and try again. */ - if ((addr & TARGET_PAGE_MASK) - != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { - if (!VICTIM_TLB_HIT(addr_write, addr)) { - tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); - } - tlb_addr = env->tlb_table[mmu_idx][index].addr_write; - } - - /* Handle an IO access. */ - if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { - if ((addr & (DATA_SIZE - 1)) != 0) { - goto do_unaligned_access; - } - - /* ??? Note that the io helpers always read data in the target - byte ordering. We should push the LE/BE request down into io. */ - val = TGT_LE(val); - glue(io_write, SUFFIX)(env, mmu_idx, index, val, addr, retaddr); - return; - } - - /* Handle slow unaligned access (it spans two pages or IO). */ - if (DATA_SIZE > 1 - && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 - >= TARGET_PAGE_SIZE)) { - int i, index2; - target_ulong page2, tlb_addr2; - do_unaligned_access: - /* Ensure the second page is in the TLB. Note that the first page - is already guaranteed to be filled, and that the second page - cannot evict the first. */ - page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK; - index2 = (page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); - tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write; - if (page2 != (tlb_addr2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK)) - && !VICTIM_TLB_HIT(addr_write, page2)) { - tlb_fill(ENV_GET_CPU(env), page2, MMU_DATA_STORE, - mmu_idx, retaddr); - } - - /* XXX: not efficient, but simple. */ - /* This loop must go in the forward direction to avoid issues - with self-modifying code in Windows 64-bit. */ - for (i = 0; i < DATA_SIZE; ++i) { - /* Little-endian extract. */ - uint8_t val8 = val >> (i * 8); - glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8, - oi, retaddr); - } - return; - } - - haddr = addr + env->tlb_table[mmu_idx][index].addend; -#if DATA_SIZE == 1 - glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val); -#else - glue(glue(st, SUFFIX), _le_p)((uint8_t *)haddr, val); -#endif -} - -#if DATA_SIZE > 1 -void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, - TCGMemOpIdx oi, uintptr_t retaddr) -{ - unsigned mmu_idx = get_mmuidx(oi); - int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); - target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write; - unsigned a_bits = get_alignment_bits(get_memop(oi)); - uintptr_t haddr; - - if (addr & ((1 << a_bits) - 1)) { - cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, - mmu_idx, retaddr); - } - - /* If the TLB entry is for a different page, reload and try again. */ - if ((addr & TARGET_PAGE_MASK) - != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { - if (!VICTIM_TLB_HIT(addr_write, addr)) { - tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); - } - tlb_addr = env->tlb_table[mmu_idx][index].addr_write; - } - - /* Handle an IO access. */ - if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { - if ((addr & (DATA_SIZE - 1)) != 0) { - goto do_unaligned_access; - } - - /* ??? Note that the io helpers always read data in the target - byte ordering. We should push the LE/BE request down into io. */ - val = TGT_BE(val); - glue(io_write, SUFFIX)(env, mmu_idx, index, val, addr, retaddr); - return; - } - - /* Handle slow unaligned access (it spans two pages or IO). */ - if (DATA_SIZE > 1 - && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 - >= TARGET_PAGE_SIZE)) { - int i, index2; - target_ulong page2, tlb_addr2; - do_unaligned_access: - /* Ensure the second page is in the TLB. Note that the first page - is already guaranteed to be filled, and that the second page - cannot evict the first. */ - page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK; - index2 = (page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); - tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write; - if (page2 != (tlb_addr2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK)) - && !VICTIM_TLB_HIT(addr_write, page2)) { - tlb_fill(ENV_GET_CPU(env), page2, MMU_DATA_STORE, - mmu_idx, retaddr); - } - - /* XXX: not efficient, but simple */ - /* This loop must go in the forward direction to avoid issues - with self-modifying code. */ - for (i = 0; i < DATA_SIZE; ++i) { - /* Big-endian extract. */ - uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8)); - glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8, - oi, retaddr); - } - return; - } - - haddr = addr + env->tlb_table[mmu_idx][index].addend; - glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val); -} -#endif /* DATA_SIZE > 1 */ -#endif /* !defined(SOFTMMU_CODE_ACCESS) */ - -#undef READ_ACCESS_TYPE -#undef DATA_TYPE -#undef SUFFIX -#undef LSUFFIX -#undef DATA_SIZE -#undef ADDR_READ -#undef WORD_TYPE -#undef SDATA_TYPE -#undef USUFFIX -#undef SSUFFIX -#undef BSWAP -#undef helper_le_ld_name -#undef helper_be_ld_name -#undef helper_le_lds_name -#undef helper_be_lds_name -#undef helper_le_st_name -#undef helper_be_st_name -- cgit v1.1 From 58410666688690ae2bd1acb2b31c0ba8661b83d1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Mon, 11 Sep 2017 18:33:25 -0300 Subject: accel/tcg: move user-exec to accel/tcg/ MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Suggested-by: Paolo Bonzini Signed-off-by: Philippe Mathieu-Daudé Message-Id: <20170911213328.9701-3-f4bug@amsat.org> Signed-off-by: Richard Henderson --- MAINTAINERS | 3 +- Makefile.target | 4 +- accel/tcg/Makefile.objs | 3 + accel/tcg/user-exec-stub.c | 34 +++ accel/tcg/user-exec.c | 575 +++++++++++++++++++++++++++++++++++++++++++++ user-exec-stub.c | 34 --- user-exec.c | 575 --------------------------------------------- 7 files changed, 615 insertions(+), 613 deletions(-) create mode 100644 accel/tcg/user-exec-stub.c create mode 100644 accel/tcg/user-exec.c delete mode 100644 user-exec-stub.c delete mode 100644 user-exec.c diff --git a/MAINTAINERS b/MAINTAINERS index 2127bb2..4bd1797 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1603,8 +1603,7 @@ Overall M: Riku Voipio S: Maintained F: thunk.c -F: user-exec.c -F: user-exec-stub.c +F: accel/tcg/user-exec*.c BSD user S: Orphan diff --git a/Makefile.target b/Makefile.target index 7f42c45..520305b 100644 --- a/Makefile.target +++ b/Makefile.target @@ -119,7 +119,7 @@ QEMU_CFLAGS+=-I$(SRC_PATH)/linux-user/$(TARGET_ABI_DIR) \ -I$(SRC_PATH)/linux-user obj-y += linux-user/ -obj-y += gdbstub.o thunk.o user-exec.o user-exec-stub.o +obj-y += gdbstub.o thunk.o endif #CONFIG_LINUX_USER @@ -132,7 +132,7 @@ QEMU_CFLAGS+=-I$(SRC_PATH)/bsd-user -I$(SRC_PATH)/bsd-user/$(TARGET_ABI_DIR) \ -I$(SRC_PATH)/bsd-user/$(HOST_VARIANT_DIR) obj-y += bsd-user/ -obj-y += gdbstub.o user-exec.o user-exec-stub.o +obj-y += gdbstub.o endif #CONFIG_BSD_USER diff --git a/accel/tcg/Makefile.objs b/accel/tcg/Makefile.objs index 22642e6..f2422d0 100644 --- a/accel/tcg/Makefile.objs +++ b/accel/tcg/Makefile.objs @@ -2,3 +2,6 @@ obj-$(CONFIG_SOFTMMU) += tcg-all.o obj-$(CONFIG_SOFTMMU) += cputlb.o obj-y += cpu-exec.o cpu-exec-common.o translate-all.o obj-y += translator.o + +obj-$(CONFIG_USER_ONLY) += user-exec.o +obj-$(call lnot,$(CONFIG_SOFTMMU)) += user-exec-stub.o diff --git a/accel/tcg/user-exec-stub.c b/accel/tcg/user-exec-stub.c new file mode 100644 index 0000000..dbcf1ad --- /dev/null +++ b/accel/tcg/user-exec-stub.c @@ -0,0 +1,34 @@ +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qom/cpu.h" +#include "sysemu/replay.h" + +void cpu_resume(CPUState *cpu) +{ +} + +void qemu_init_vcpu(CPUState *cpu) +{ +} + +/* User mode emulation does not support record/replay yet. */ + +bool replay_exception(void) +{ + return true; +} + +bool replay_has_exception(void) +{ + return false; +} + +bool replay_interrupt(void) +{ + return true; +} + +bool replay_has_interrupt(void) +{ + return false; +} diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c new file mode 100644 index 0000000..2a975ea --- /dev/null +++ b/accel/tcg/user-exec.c @@ -0,0 +1,575 @@ +/* + * User emulator execution + * + * Copyright (c) 2003-2005 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ +#include "qemu/osdep.h" +#include "cpu.h" +#include "disas/disas.h" +#include "exec/exec-all.h" +#include "tcg.h" +#include "qemu/bitops.h" +#include "exec/cpu_ldst.h" +#include "translate-all.h" + +#undef EAX +#undef ECX +#undef EDX +#undef EBX +#undef ESP +#undef EBP +#undef ESI +#undef EDI +#undef EIP +#ifdef __linux__ +#include +#endif + +//#define DEBUG_SIGNAL + +/* exit the current TB from a signal handler. The host registers are + restored in a state compatible with the CPU emulator + */ +static void cpu_exit_tb_from_sighandler(CPUState *cpu, sigset_t *old_set) +{ + /* XXX: use siglongjmp ? */ + sigprocmask(SIG_SETMASK, old_set, NULL); + cpu_loop_exit_noexc(cpu); +} + +/* 'pc' is the host PC at which the exception was raised. 'address' is + the effective address of the memory exception. 'is_write' is 1 if a + write caused the exception and otherwise 0'. 'old_set' is the + signal set which should be restored */ +static inline int handle_cpu_signal(uintptr_t pc, unsigned long address, + int is_write, sigset_t *old_set) +{ + CPUState *cpu = current_cpu; + CPUClass *cc; + int ret; + + /* For synchronous signals we expect to be coming from the vCPU + * thread (so current_cpu should be valid) and either from running + * code or during translation which can fault as we cross pages. + * + * If neither is true then something has gone wrong and we should + * abort rather than try and restart the vCPU execution. + */ + if (!cpu || !cpu->running) { + printf("qemu:%s received signal outside vCPU context @ pc=0x%" + PRIxPTR "\n", __func__, pc); + abort(); + } + +#if defined(DEBUG_SIGNAL) + printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n", + pc, address, is_write, *(unsigned long *)old_set); +#endif + /* XXX: locking issue */ + if (is_write && h2g_valid(address)) { + switch (page_unprotect(h2g(address), pc)) { + case 0: + /* Fault not caused by a page marked unwritable to protect + * cached translations, must be the guest binary's problem + */ + break; + case 1: + /* Fault caused by protection of cached translation; TBs + * invalidated, so resume execution + */ + return 1; + case 2: + /* Fault caused by protection of cached translation, and the + * currently executing TB was modified and must be exited + * immediately. + */ + cpu_exit_tb_from_sighandler(cpu, old_set); + g_assert_not_reached(); + default: + g_assert_not_reached(); + } + } + + /* Convert forcefully to guest address space, invalid addresses + are still valid segv ones */ + address = h2g_nocheck(address); + + cc = CPU_GET_CLASS(cpu); + /* see if it is an MMU fault */ + g_assert(cc->handle_mmu_fault); + ret = cc->handle_mmu_fault(cpu, address, is_write, MMU_USER_IDX); + if (ret < 0) { + return 0; /* not an MMU fault */ + } + if (ret == 0) { + return 1; /* the MMU fault was handled without causing real CPU fault */ + } + + /* Now we have a real cpu fault. Since this is the exact location of + * the exception, we must undo the adjustment done by cpu_restore_state + * for handling call return addresses. */ + cpu_restore_state(cpu, pc + GETPC_ADJ); + + sigprocmask(SIG_SETMASK, old_set, NULL); + cpu_loop_exit(cpu); + + /* never comes here */ + return 1; +} + +#if defined(__i386__) + +#if defined(__NetBSD__) +#include + +#define EIP_sig(context) ((context)->uc_mcontext.__gregs[_REG_EIP]) +#define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO]) +#define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR]) +#define MASK_sig(context) ((context)->uc_sigmask) +#elif defined(__FreeBSD__) || defined(__DragonFly__) +#include + +#define EIP_sig(context) (*((unsigned long *)&(context)->uc_mcontext.mc_eip)) +#define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno) +#define ERROR_sig(context) ((context)->uc_mcontext.mc_err) +#define MASK_sig(context) ((context)->uc_sigmask) +#elif defined(__OpenBSD__) +#define EIP_sig(context) ((context)->sc_eip) +#define TRAP_sig(context) ((context)->sc_trapno) +#define ERROR_sig(context) ((context)->sc_err) +#define MASK_sig(context) ((context)->sc_mask) +#else +#define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP]) +#define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO]) +#define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR]) +#define MASK_sig(context) ((context)->uc_sigmask) +#endif + +int cpu_signal_handler(int host_signum, void *pinfo, + void *puc) +{ + siginfo_t *info = pinfo; +#if defined(__NetBSD__) || defined(__FreeBSD__) || defined(__DragonFly__) + ucontext_t *uc = puc; +#elif defined(__OpenBSD__) + struct sigcontext *uc = puc; +#else + ucontext_t *uc = puc; +#endif + unsigned long pc; + int trapno; + +#ifndef REG_EIP +/* for glibc 2.1 */ +#define REG_EIP EIP +#define REG_ERR ERR +#define REG_TRAPNO TRAPNO +#endif + pc = EIP_sig(uc); + trapno = TRAP_sig(uc); + return handle_cpu_signal(pc, (unsigned long)info->si_addr, + trapno == 0xe ? + (ERROR_sig(uc) >> 1) & 1 : 0, + &MASK_sig(uc)); +} + +#elif defined(__x86_64__) + +#ifdef __NetBSD__ +#define PC_sig(context) _UC_MACHINE_PC(context) +#define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO]) +#define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR]) +#define MASK_sig(context) ((context)->uc_sigmask) +#elif defined(__OpenBSD__) +#define PC_sig(context) ((context)->sc_rip) +#define TRAP_sig(context) ((context)->sc_trapno) +#define ERROR_sig(context) ((context)->sc_err) +#define MASK_sig(context) ((context)->sc_mask) +#elif defined(__FreeBSD__) || defined(__DragonFly__) +#include + +#define PC_sig(context) (*((unsigned long *)&(context)->uc_mcontext.mc_rip)) +#define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno) +#define ERROR_sig(context) ((context)->uc_mcontext.mc_err) +#define MASK_sig(context) ((context)->uc_sigmask) +#else +#define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP]) +#define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO]) +#define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR]) +#define MASK_sig(context) ((context)->uc_sigmask) +#endif + +int cpu_signal_handler(int host_signum, void *pinfo, + void *puc) +{ + siginfo_t *info = pinfo; + unsigned long pc; +#if defined(__NetBSD__) || defined(__FreeBSD__) || defined(__DragonFly__) + ucontext_t *uc = puc; +#elif defined(__OpenBSD__) + struct sigcontext *uc = puc; +#else + ucontext_t *uc = puc; +#endif + + pc = PC_sig(uc); + return handle_cpu_signal(pc, (unsigned long)info->si_addr, + TRAP_sig(uc) == 0xe ? + (ERROR_sig(uc) >> 1) & 1 : 0, + &MASK_sig(uc)); +} + +#elif defined(_ARCH_PPC) + +/*********************************************************************** + * signal context platform-specific definitions + * From Wine + */ +#ifdef linux +/* All Registers access - only for local access */ +#define REG_sig(reg_name, context) \ + ((context)->uc_mcontext.regs->reg_name) +/* Gpr Registers access */ +#define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context) +/* Program counter */ +#define IAR_sig(context) REG_sig(nip, context) +/* Machine State Register (Supervisor) */ +#define MSR_sig(context) REG_sig(msr, context) +/* Count register */ +#define CTR_sig(context) REG_sig(ctr, context) +/* User's integer exception register */ +#define XER_sig(context) REG_sig(xer, context) +/* Link register */ +#define LR_sig(context) REG_sig(link, context) +/* Condition register */ +#define CR_sig(context) REG_sig(ccr, context) + +/* Float Registers access */ +#define FLOAT_sig(reg_num, context) \ + (((double *)((char *)((context)->uc_mcontext.regs + 48 * 4)))[reg_num]) +#define FPSCR_sig(context) \ + (*(int *)((char *)((context)->uc_mcontext.regs + (48 + 32 * 2) * 4))) +/* Exception Registers access */ +#define DAR_sig(context) REG_sig(dar, context) +#define DSISR_sig(context) REG_sig(dsisr, context) +#define TRAP_sig(context) REG_sig(trap, context) +#endif /* linux */ + +#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) +#include +#define IAR_sig(context) ((context)->uc_mcontext.mc_srr0) +#define MSR_sig(context) ((context)->uc_mcontext.mc_srr1) +#define CTR_sig(context) ((context)->uc_mcontext.mc_ctr) +#define XER_sig(context) ((context)->uc_mcontext.mc_xer) +#define LR_sig(context) ((context)->uc_mcontext.mc_lr) +#define CR_sig(context) ((context)->uc_mcontext.mc_cr) +/* Exception Registers access */ +#define DAR_sig(context) ((context)->uc_mcontext.mc_dar) +#define DSISR_sig(context) ((context)->uc_mcontext.mc_dsisr) +#define TRAP_sig(context) ((context)->uc_mcontext.mc_exc) +#endif /* __FreeBSD__|| __FreeBSD_kernel__ */ + +int cpu_signal_handler(int host_signum, void *pinfo, + void *puc) +{ + siginfo_t *info = pinfo; +#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) + ucontext_t *uc = puc; +#else + ucontext_t *uc = puc; +#endif + unsigned long pc; + int is_write; + + pc = IAR_sig(uc); + is_write = 0; +#if 0 + /* ppc 4xx case */ + if (DSISR_sig(uc) & 0x00800000) { + is_write = 1; + } +#else + if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000)) { + is_write = 1; + } +#endif + return handle_cpu_signal(pc, (unsigned long)info->si_addr, + is_write, &uc->uc_sigmask); +} + +#elif defined(__alpha__) + +int cpu_signal_handler(int host_signum, void *pinfo, + void *puc) +{ + siginfo_t *info = pinfo; + ucontext_t *uc = puc; + uint32_t *pc = uc->uc_mcontext.sc_pc; + uint32_t insn = *pc; + int is_write = 0; + + /* XXX: need kernel patch to get write flag faster */ + switch (insn >> 26) { + case 0x0d: /* stw */ + case 0x0e: /* stb */ + case 0x0f: /* stq_u */ + case 0x24: /* stf */ + case 0x25: /* stg */ + case 0x26: /* sts */ + case 0x27: /* stt */ + case 0x2c: /* stl */ + case 0x2d: /* stq */ + case 0x2e: /* stl_c */ + case 0x2f: /* stq_c */ + is_write = 1; + } + + return handle_cpu_signal(pc, (unsigned long)info->si_addr, + is_write, &uc->uc_sigmask); +} +#elif defined(__sparc__) + +int cpu_signal_handler(int host_signum, void *pinfo, + void *puc) +{ + siginfo_t *info = pinfo; + int is_write; + uint32_t insn; +#if !defined(__arch64__) || defined(CONFIG_SOLARIS) + uint32_t *regs = (uint32_t *)(info + 1); + void *sigmask = (regs + 20); + /* XXX: is there a standard glibc define ? */ + unsigned long pc = regs[1]; +#else +#ifdef __linux__ + struct sigcontext *sc = puc; + unsigned long pc = sc->sigc_regs.tpc; + void *sigmask = (void *)sc->sigc_mask; +#elif defined(__OpenBSD__) + struct sigcontext *uc = puc; + unsigned long pc = uc->sc_pc; + void *sigmask = (void *)(long)uc->sc_mask; +#elif defined(__NetBSD__) + ucontext_t *uc = puc; + unsigned long pc = _UC_MACHINE_PC(uc); + void *sigmask = (void *)&uc->uc_sigmask; +#endif +#endif + + /* XXX: need kernel patch to get write flag faster */ + is_write = 0; + insn = *(uint32_t *)pc; + if ((insn >> 30) == 3) { + switch ((insn >> 19) & 0x3f) { + case 0x05: /* stb */ + case 0x15: /* stba */ + case 0x06: /* sth */ + case 0x16: /* stha */ + case 0x04: /* st */ + case 0x14: /* sta */ + case 0x07: /* std */ + case 0x17: /* stda */ + case 0x0e: /* stx */ + case 0x1e: /* stxa */ + case 0x24: /* stf */ + case 0x34: /* stfa */ + case 0x27: /* stdf */ + case 0x37: /* stdfa */ + case 0x26: /* stqf */ + case 0x36: /* stqfa */ + case 0x25: /* stfsr */ + case 0x3c: /* casa */ + case 0x3e: /* casxa */ + is_write = 1; + break; + } + } + return handle_cpu_signal(pc, (unsigned long)info->si_addr, + is_write, sigmask); +} + +#elif defined(__arm__) + +#if defined(__NetBSD__) +#include +#endif + +int cpu_signal_handler(int host_signum, void *pinfo, + void *puc) +{ + siginfo_t *info = pinfo; +#if defined(__NetBSD__) + ucontext_t *uc = puc; +#else + ucontext_t *uc = puc; +#endif + unsigned long pc; + int is_write; + +#if defined(__NetBSD__) + pc = uc->uc_mcontext.__gregs[_REG_R15]; +#elif defined(__GLIBC__) && (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3)) + pc = uc->uc_mcontext.gregs[R15]; +#else + pc = uc->uc_mcontext.arm_pc; +#endif + + /* error_code is the FSR value, in which bit 11 is WnR (assuming a v6 or + * later processor; on v5 we will always report this as a read). + */ + is_write = extract32(uc->uc_mcontext.error_code, 11, 1); + return handle_cpu_signal(pc, (unsigned long)info->si_addr, + is_write, + &uc->uc_sigmask); +} + +#elif defined(__aarch64__) + +int cpu_signal_handler(int host_signum, void *pinfo, void *puc) +{ + siginfo_t *info = pinfo; + ucontext_t *uc = puc; + uintptr_t pc = uc->uc_mcontext.pc; + uint32_t insn = *(uint32_t *)pc; + bool is_write; + + /* XXX: need kernel patch to get write flag faster. */ + is_write = ( (insn & 0xbfff0000) == 0x0c000000 /* C3.3.1 */ + || (insn & 0xbfe00000) == 0x0c800000 /* C3.3.2 */ + || (insn & 0xbfdf0000) == 0x0d000000 /* C3.3.3 */ + || (insn & 0xbfc00000) == 0x0d800000 /* C3.3.4 */ + || (insn & 0x3f400000) == 0x08000000 /* C3.3.6 */ + || (insn & 0x3bc00000) == 0x39000000 /* C3.3.13 */ + || (insn & 0x3fc00000) == 0x3d800000 /* ... 128bit */ + /* Ingore bits 10, 11 & 21, controlling indexing. */ + || (insn & 0x3bc00000) == 0x38000000 /* C3.3.8-12 */ + || (insn & 0x3fe00000) == 0x3c800000 /* ... 128bit */ + /* Ignore bits 23 & 24, controlling indexing. */ + || (insn & 0x3a400000) == 0x28000000); /* C3.3.7,14-16 */ + + return handle_cpu_signal(pc, (uintptr_t)info->si_addr, + is_write, &uc->uc_sigmask); +} + +#elif defined(__ia64) + +#ifndef __ISR_VALID + /* This ought to be in ... */ +# define __ISR_VALID 1 +#endif + +int cpu_signal_handler(int host_signum, void *pinfo, void *puc) +{ + siginfo_t *info = pinfo; + ucontext_t *uc = puc; + unsigned long ip; + int is_write = 0; + + ip = uc->uc_mcontext.sc_ip; + switch (host_signum) { + case SIGILL: + case SIGFPE: + case SIGSEGV: + case SIGBUS: + case SIGTRAP: + if (info->si_code && (info->si_segvflags & __ISR_VALID)) { + /* ISR.W (write-access) is bit 33: */ + is_write = (info->si_isr >> 33) & 1; + } + break; + + default: + break; + } + return handle_cpu_signal(ip, (unsigned long)info->si_addr, + is_write, + (sigset_t *)&uc->uc_sigmask); +} + +#elif defined(__s390__) + +int cpu_signal_handler(int host_signum, void *pinfo, + void *puc) +{ + siginfo_t *info = pinfo; + ucontext_t *uc = puc; + unsigned long pc; + uint16_t *pinsn; + int is_write = 0; + + pc = uc->uc_mcontext.psw.addr; + + /* ??? On linux, the non-rt signal handler has 4 (!) arguments instead + of the normal 2 arguments. The 3rd argument contains the "int_code" + from the hardware which does in fact contain the is_write value. + The rt signal handler, as far as I can tell, does not give this value + at all. Not that we could get to it from here even if it were. */ + /* ??? This is not even close to complete, since it ignores all + of the read-modify-write instructions. */ + pinsn = (uint16_t *)pc; + switch (pinsn[0] >> 8) { + case 0x50: /* ST */ + case 0x42: /* STC */ + case 0x40: /* STH */ + is_write = 1; + break; + case 0xc4: /* RIL format insns */ + switch (pinsn[0] & 0xf) { + case 0xf: /* STRL */ + case 0xb: /* STGRL */ + case 0x7: /* STHRL */ + is_write = 1; + } + break; + case 0xe3: /* RXY format insns */ + switch (pinsn[2] & 0xff) { + case 0x50: /* STY */ + case 0x24: /* STG */ + case 0x72: /* STCY */ + case 0x70: /* STHY */ + case 0x8e: /* STPQ */ + case 0x3f: /* STRVH */ + case 0x3e: /* STRV */ + case 0x2f: /* STRVG */ + is_write = 1; + } + break; + } + return handle_cpu_signal(pc, (unsigned long)info->si_addr, + is_write, &uc->uc_sigmask); +} + +#elif defined(__mips__) + +int cpu_signal_handler(int host_signum, void *pinfo, + void *puc) +{ + siginfo_t *info = pinfo; + ucontext_t *uc = puc; + greg_t pc = uc->uc_mcontext.pc; + int is_write; + + /* XXX: compute is_write */ + is_write = 0; + return handle_cpu_signal(pc, (unsigned long)info->si_addr, + is_write, &uc->uc_sigmask); +} + +#else + +#error host CPU specific signal handler needed + +#endif diff --git a/user-exec-stub.c b/user-exec-stub.c deleted file mode 100644 index dbcf1ad..0000000 --- a/user-exec-stub.c +++ /dev/null @@ -1,34 +0,0 @@ -#include "qemu/osdep.h" -#include "qemu-common.h" -#include "qom/cpu.h" -#include "sysemu/replay.h" - -void cpu_resume(CPUState *cpu) -{ -} - -void qemu_init_vcpu(CPUState *cpu) -{ -} - -/* User mode emulation does not support record/replay yet. */ - -bool replay_exception(void) -{ - return true; -} - -bool replay_has_exception(void) -{ - return false; -} - -bool replay_interrupt(void) -{ - return true; -} - -bool replay_has_interrupt(void) -{ - return false; -} diff --git a/user-exec.c b/user-exec.c deleted file mode 100644 index 2a975ea..0000000 --- a/user-exec.c +++ /dev/null @@ -1,575 +0,0 @@ -/* - * User emulator execution - * - * Copyright (c) 2003-2005 Fabrice Bellard - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ -#include "qemu/osdep.h" -#include "cpu.h" -#include "disas/disas.h" -#include "exec/exec-all.h" -#include "tcg.h" -#include "qemu/bitops.h" -#include "exec/cpu_ldst.h" -#include "translate-all.h" - -#undef EAX -#undef ECX -#undef EDX -#undef EBX -#undef ESP -#undef EBP -#undef ESI -#undef EDI -#undef EIP -#ifdef __linux__ -#include -#endif - -//#define DEBUG_SIGNAL - -/* exit the current TB from a signal handler. The host registers are - restored in a state compatible with the CPU emulator - */ -static void cpu_exit_tb_from_sighandler(CPUState *cpu, sigset_t *old_set) -{ - /* XXX: use siglongjmp ? */ - sigprocmask(SIG_SETMASK, old_set, NULL); - cpu_loop_exit_noexc(cpu); -} - -/* 'pc' is the host PC at which the exception was raised. 'address' is - the effective address of the memory exception. 'is_write' is 1 if a - write caused the exception and otherwise 0'. 'old_set' is the - signal set which should be restored */ -static inline int handle_cpu_signal(uintptr_t pc, unsigned long address, - int is_write, sigset_t *old_set) -{ - CPUState *cpu = current_cpu; - CPUClass *cc; - int ret; - - /* For synchronous signals we expect to be coming from the vCPU - * thread (so current_cpu should be valid) and either from running - * code or during translation which can fault as we cross pages. - * - * If neither is true then something has gone wrong and we should - * abort rather than try and restart the vCPU execution. - */ - if (!cpu || !cpu->running) { - printf("qemu:%s received signal outside vCPU context @ pc=0x%" - PRIxPTR "\n", __func__, pc); - abort(); - } - -#if defined(DEBUG_SIGNAL) - printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n", - pc, address, is_write, *(unsigned long *)old_set); -#endif - /* XXX: locking issue */ - if (is_write && h2g_valid(address)) { - switch (page_unprotect(h2g(address), pc)) { - case 0: - /* Fault not caused by a page marked unwritable to protect - * cached translations, must be the guest binary's problem - */ - break; - case 1: - /* Fault caused by protection of cached translation; TBs - * invalidated, so resume execution - */ - return 1; - case 2: - /* Fault caused by protection of cached translation, and the - * currently executing TB was modified and must be exited - * immediately. - */ - cpu_exit_tb_from_sighandler(cpu, old_set); - g_assert_not_reached(); - default: - g_assert_not_reached(); - } - } - - /* Convert forcefully to guest address space, invalid addresses - are still valid segv ones */ - address = h2g_nocheck(address); - - cc = CPU_GET_CLASS(cpu); - /* see if it is an MMU fault */ - g_assert(cc->handle_mmu_fault); - ret = cc->handle_mmu_fault(cpu, address, is_write, MMU_USER_IDX); - if (ret < 0) { - return 0; /* not an MMU fault */ - } - if (ret == 0) { - return 1; /* the MMU fault was handled without causing real CPU fault */ - } - - /* Now we have a real cpu fault. Since this is the exact location of - * the exception, we must undo the adjustment done by cpu_restore_state - * for handling call return addresses. */ - cpu_restore_state(cpu, pc + GETPC_ADJ); - - sigprocmask(SIG_SETMASK, old_set, NULL); - cpu_loop_exit(cpu); - - /* never comes here */ - return 1; -} - -#if defined(__i386__) - -#if defined(__NetBSD__) -#include - -#define EIP_sig(context) ((context)->uc_mcontext.__gregs[_REG_EIP]) -#define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO]) -#define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR]) -#define MASK_sig(context) ((context)->uc_sigmask) -#elif defined(__FreeBSD__) || defined(__DragonFly__) -#include - -#define EIP_sig(context) (*((unsigned long *)&(context)->uc_mcontext.mc_eip)) -#define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno) -#define ERROR_sig(context) ((context)->uc_mcontext.mc_err) -#define MASK_sig(context) ((context)->uc_sigmask) -#elif defined(__OpenBSD__) -#define EIP_sig(context) ((context)->sc_eip) -#define TRAP_sig(context) ((context)->sc_trapno) -#define ERROR_sig(context) ((context)->sc_err) -#define MASK_sig(context) ((context)->sc_mask) -#else -#define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP]) -#define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO]) -#define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR]) -#define MASK_sig(context) ((context)->uc_sigmask) -#endif - -int cpu_signal_handler(int host_signum, void *pinfo, - void *puc) -{ - siginfo_t *info = pinfo; -#if defined(__NetBSD__) || defined(__FreeBSD__) || defined(__DragonFly__) - ucontext_t *uc = puc; -#elif defined(__OpenBSD__) - struct sigcontext *uc = puc; -#else - ucontext_t *uc = puc; -#endif - unsigned long pc; - int trapno; - -#ifndef REG_EIP -/* for glibc 2.1 */ -#define REG_EIP EIP -#define REG_ERR ERR -#define REG_TRAPNO TRAPNO -#endif - pc = EIP_sig(uc); - trapno = TRAP_sig(uc); - return handle_cpu_signal(pc, (unsigned long)info->si_addr, - trapno == 0xe ? - (ERROR_sig(uc) >> 1) & 1 : 0, - &MASK_sig(uc)); -} - -#elif defined(__x86_64__) - -#ifdef __NetBSD__ -#define PC_sig(context) _UC_MACHINE_PC(context) -#define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO]) -#define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR]) -#define MASK_sig(context) ((context)->uc_sigmask) -#elif defined(__OpenBSD__) -#define PC_sig(context) ((context)->sc_rip) -#define TRAP_sig(context) ((context)->sc_trapno) -#define ERROR_sig(context) ((context)->sc_err) -#define MASK_sig(context) ((context)->sc_mask) -#elif defined(__FreeBSD__) || defined(__DragonFly__) -#include - -#define PC_sig(context) (*((unsigned long *)&(context)->uc_mcontext.mc_rip)) -#define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno) -#define ERROR_sig(context) ((context)->uc_mcontext.mc_err) -#define MASK_sig(context) ((context)->uc_sigmask) -#else -#define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP]) -#define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO]) -#define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR]) -#define MASK_sig(context) ((context)->uc_sigmask) -#endif - -int cpu_signal_handler(int host_signum, void *pinfo, - void *puc) -{ - siginfo_t *info = pinfo; - unsigned long pc; -#if defined(__NetBSD__) || defined(__FreeBSD__) || defined(__DragonFly__) - ucontext_t *uc = puc; -#elif defined(__OpenBSD__) - struct sigcontext *uc = puc; -#else - ucontext_t *uc = puc; -#endif - - pc = PC_sig(uc); - return handle_cpu_signal(pc, (unsigned long)info->si_addr, - TRAP_sig(uc) == 0xe ? - (ERROR_sig(uc) >> 1) & 1 : 0, - &MASK_sig(uc)); -} - -#elif defined(_ARCH_PPC) - -/*********************************************************************** - * signal context platform-specific definitions - * From Wine - */ -#ifdef linux -/* All Registers access - only for local access */ -#define REG_sig(reg_name, context) \ - ((context)->uc_mcontext.regs->reg_name) -/* Gpr Registers access */ -#define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context) -/* Program counter */ -#define IAR_sig(context) REG_sig(nip, context) -/* Machine State Register (Supervisor) */ -#define MSR_sig(context) REG_sig(msr, context) -/* Count register */ -#define CTR_sig(context) REG_sig(ctr, context) -/* User's integer exception register */ -#define XER_sig(context) REG_sig(xer, context) -/* Link register */ -#define LR_sig(context) REG_sig(link, context) -/* Condition register */ -#define CR_sig(context) REG_sig(ccr, context) - -/* Float Registers access */ -#define FLOAT_sig(reg_num, context) \ - (((double *)((char *)((context)->uc_mcontext.regs + 48 * 4)))[reg_num]) -#define FPSCR_sig(context) \ - (*(int *)((char *)((context)->uc_mcontext.regs + (48 + 32 * 2) * 4))) -/* Exception Registers access */ -#define DAR_sig(context) REG_sig(dar, context) -#define DSISR_sig(context) REG_sig(dsisr, context) -#define TRAP_sig(context) REG_sig(trap, context) -#endif /* linux */ - -#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) -#include -#define IAR_sig(context) ((context)->uc_mcontext.mc_srr0) -#define MSR_sig(context) ((context)->uc_mcontext.mc_srr1) -#define CTR_sig(context) ((context)->uc_mcontext.mc_ctr) -#define XER_sig(context) ((context)->uc_mcontext.mc_xer) -#define LR_sig(context) ((context)->uc_mcontext.mc_lr) -#define CR_sig(context) ((context)->uc_mcontext.mc_cr) -/* Exception Registers access */ -#define DAR_sig(context) ((context)->uc_mcontext.mc_dar) -#define DSISR_sig(context) ((context)->uc_mcontext.mc_dsisr) -#define TRAP_sig(context) ((context)->uc_mcontext.mc_exc) -#endif /* __FreeBSD__|| __FreeBSD_kernel__ */ - -int cpu_signal_handler(int host_signum, void *pinfo, - void *puc) -{ - siginfo_t *info = pinfo; -#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) - ucontext_t *uc = puc; -#else - ucontext_t *uc = puc; -#endif - unsigned long pc; - int is_write; - - pc = IAR_sig(uc); - is_write = 0; -#if 0 - /* ppc 4xx case */ - if (DSISR_sig(uc) & 0x00800000) { - is_write = 1; - } -#else - if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000)) { - is_write = 1; - } -#endif - return handle_cpu_signal(pc, (unsigned long)info->si_addr, - is_write, &uc->uc_sigmask); -} - -#elif defined(__alpha__) - -int cpu_signal_handler(int host_signum, void *pinfo, - void *puc) -{ - siginfo_t *info = pinfo; - ucontext_t *uc = puc; - uint32_t *pc = uc->uc_mcontext.sc_pc; - uint32_t insn = *pc; - int is_write = 0; - - /* XXX: need kernel patch to get write flag faster */ - switch (insn >> 26) { - case 0x0d: /* stw */ - case 0x0e: /* stb */ - case 0x0f: /* stq_u */ - case 0x24: /* stf */ - case 0x25: /* stg */ - case 0x26: /* sts */ - case 0x27: /* stt */ - case 0x2c: /* stl */ - case 0x2d: /* stq */ - case 0x2e: /* stl_c */ - case 0x2f: /* stq_c */ - is_write = 1; - } - - return handle_cpu_signal(pc, (unsigned long)info->si_addr, - is_write, &uc->uc_sigmask); -} -#elif defined(__sparc__) - -int cpu_signal_handler(int host_signum, void *pinfo, - void *puc) -{ - siginfo_t *info = pinfo; - int is_write; - uint32_t insn; -#if !defined(__arch64__) || defined(CONFIG_SOLARIS) - uint32_t *regs = (uint32_t *)(info + 1); - void *sigmask = (regs + 20); - /* XXX: is there a standard glibc define ? */ - unsigned long pc = regs[1]; -#else -#ifdef __linux__ - struct sigcontext *sc = puc; - unsigned long pc = sc->sigc_regs.tpc; - void *sigmask = (void *)sc->sigc_mask; -#elif defined(__OpenBSD__) - struct sigcontext *uc = puc; - unsigned long pc = uc->sc_pc; - void *sigmask = (void *)(long)uc->sc_mask; -#elif defined(__NetBSD__) - ucontext_t *uc = puc; - unsigned long pc = _UC_MACHINE_PC(uc); - void *sigmask = (void *)&uc->uc_sigmask; -#endif -#endif - - /* XXX: need kernel patch to get write flag faster */ - is_write = 0; - insn = *(uint32_t *)pc; - if ((insn >> 30) == 3) { - switch ((insn >> 19) & 0x3f) { - case 0x05: /* stb */ - case 0x15: /* stba */ - case 0x06: /* sth */ - case 0x16: /* stha */ - case 0x04: /* st */ - case 0x14: /* sta */ - case 0x07: /* std */ - case 0x17: /* stda */ - case 0x0e: /* stx */ - case 0x1e: /* stxa */ - case 0x24: /* stf */ - case 0x34: /* stfa */ - case 0x27: /* stdf */ - case 0x37: /* stdfa */ - case 0x26: /* stqf */ - case 0x36: /* stqfa */ - case 0x25: /* stfsr */ - case 0x3c: /* casa */ - case 0x3e: /* casxa */ - is_write = 1; - break; - } - } - return handle_cpu_signal(pc, (unsigned long)info->si_addr, - is_write, sigmask); -} - -#elif defined(__arm__) - -#if defined(__NetBSD__) -#include -#endif - -int cpu_signal_handler(int host_signum, void *pinfo, - void *puc) -{ - siginfo_t *info = pinfo; -#if defined(__NetBSD__) - ucontext_t *uc = puc; -#else - ucontext_t *uc = puc; -#endif - unsigned long pc; - int is_write; - -#if defined(__NetBSD__) - pc = uc->uc_mcontext.__gregs[_REG_R15]; -#elif defined(__GLIBC__) && (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3)) - pc = uc->uc_mcontext.gregs[R15]; -#else - pc = uc->uc_mcontext.arm_pc; -#endif - - /* error_code is the FSR value, in which bit 11 is WnR (assuming a v6 or - * later processor; on v5 we will always report this as a read). - */ - is_write = extract32(uc->uc_mcontext.error_code, 11, 1); - return handle_cpu_signal(pc, (unsigned long)info->si_addr, - is_write, - &uc->uc_sigmask); -} - -#elif defined(__aarch64__) - -int cpu_signal_handler(int host_signum, void *pinfo, void *puc) -{ - siginfo_t *info = pinfo; - ucontext_t *uc = puc; - uintptr_t pc = uc->uc_mcontext.pc; - uint32_t insn = *(uint32_t *)pc; - bool is_write; - - /* XXX: need kernel patch to get write flag faster. */ - is_write = ( (insn & 0xbfff0000) == 0x0c000000 /* C3.3.1 */ - || (insn & 0xbfe00000) == 0x0c800000 /* C3.3.2 */ - || (insn & 0xbfdf0000) == 0x0d000000 /* C3.3.3 */ - || (insn & 0xbfc00000) == 0x0d800000 /* C3.3.4 */ - || (insn & 0x3f400000) == 0x08000000 /* C3.3.6 */ - || (insn & 0x3bc00000) == 0x39000000 /* C3.3.13 */ - || (insn & 0x3fc00000) == 0x3d800000 /* ... 128bit */ - /* Ingore bits 10, 11 & 21, controlling indexing. */ - || (insn & 0x3bc00000) == 0x38000000 /* C3.3.8-12 */ - || (insn & 0x3fe00000) == 0x3c800000 /* ... 128bit */ - /* Ignore bits 23 & 24, controlling indexing. */ - || (insn & 0x3a400000) == 0x28000000); /* C3.3.7,14-16 */ - - return handle_cpu_signal(pc, (uintptr_t)info->si_addr, - is_write, &uc->uc_sigmask); -} - -#elif defined(__ia64) - -#ifndef __ISR_VALID - /* This ought to be in ... */ -# define __ISR_VALID 1 -#endif - -int cpu_signal_handler(int host_signum, void *pinfo, void *puc) -{ - siginfo_t *info = pinfo; - ucontext_t *uc = puc; - unsigned long ip; - int is_write = 0; - - ip = uc->uc_mcontext.sc_ip; - switch (host_signum) { - case SIGILL: - case SIGFPE: - case SIGSEGV: - case SIGBUS: - case SIGTRAP: - if (info->si_code && (info->si_segvflags & __ISR_VALID)) { - /* ISR.W (write-access) is bit 33: */ - is_write = (info->si_isr >> 33) & 1; - } - break; - - default: - break; - } - return handle_cpu_signal(ip, (unsigned long)info->si_addr, - is_write, - (sigset_t *)&uc->uc_sigmask); -} - -#elif defined(__s390__) - -int cpu_signal_handler(int host_signum, void *pinfo, - void *puc) -{ - siginfo_t *info = pinfo; - ucontext_t *uc = puc; - unsigned long pc; - uint16_t *pinsn; - int is_write = 0; - - pc = uc->uc_mcontext.psw.addr; - - /* ??? On linux, the non-rt signal handler has 4 (!) arguments instead - of the normal 2 arguments. The 3rd argument contains the "int_code" - from the hardware which does in fact contain the is_write value. - The rt signal handler, as far as I can tell, does not give this value - at all. Not that we could get to it from here even if it were. */ - /* ??? This is not even close to complete, since it ignores all - of the read-modify-write instructions. */ - pinsn = (uint16_t *)pc; - switch (pinsn[0] >> 8) { - case 0x50: /* ST */ - case 0x42: /* STC */ - case 0x40: /* STH */ - is_write = 1; - break; - case 0xc4: /* RIL format insns */ - switch (pinsn[0] & 0xf) { - case 0xf: /* STRL */ - case 0xb: /* STGRL */ - case 0x7: /* STHRL */ - is_write = 1; - } - break; - case 0xe3: /* RXY format insns */ - switch (pinsn[2] & 0xff) { - case 0x50: /* STY */ - case 0x24: /* STG */ - case 0x72: /* STCY */ - case 0x70: /* STHY */ - case 0x8e: /* STPQ */ - case 0x3f: /* STRVH */ - case 0x3e: /* STRV */ - case 0x2f: /* STRVG */ - is_write = 1; - } - break; - } - return handle_cpu_signal(pc, (unsigned long)info->si_addr, - is_write, &uc->uc_sigmask); -} - -#elif defined(__mips__) - -int cpu_signal_handler(int host_signum, void *pinfo, - void *puc) -{ - siginfo_t *info = pinfo; - ucontext_t *uc = puc; - greg_t pc = uc->uc_mcontext.pc; - int is_write; - - /* XXX: compute is_write */ - is_write = 0; - return handle_cpu_signal(pc, (unsigned long)info->si_addr, - is_write, &uc->uc_sigmask); -} - -#else - -#error host CPU specific signal handler needed - -#endif -- cgit v1.1 From 61a3f8f6c047620f4d55ade4f5607a9d019984c6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Mon, 11 Sep 2017 18:33:26 -0300 Subject: accel/tcg: move tcg-runtime to accel/tcg/ MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Suggested-by: Paolo Bonzini Signed-off-by: Philippe Mathieu-Daudé Message-Id: <20170911213328.9701-4-f4bug@amsat.org> Signed-off-by: Richard Henderson --- Makefile.target | 2 +- accel/tcg/Makefile.objs | 1 + accel/tcg/tcg-runtime.c | 234 ++++++++++++++++++++++++++++++++++++++++++++++++ accel/tcg/tcg-runtime.h | 136 ++++++++++++++++++++++++++++ tcg/tcg-runtime.c | 234 ------------------------------------------------ tcg/tcg-runtime.h | 136 ---------------------------- 6 files changed, 372 insertions(+), 371 deletions(-) create mode 100644 accel/tcg/tcg-runtime.c create mode 100644 accel/tcg/tcg-runtime.h delete mode 100644 tcg/tcg-runtime.c delete mode 100644 tcg/tcg-runtime.h diff --git a/Makefile.target b/Makefile.target index 520305b..6361f95 100644 --- a/Makefile.target +++ b/Makefile.target @@ -94,7 +94,7 @@ all: $(PROGS) stap obj-y += exec.o obj-y += accel/ obj-$(CONFIG_TCG) += tcg/tcg.o tcg/tcg-op.o tcg/optimize.o -obj-$(CONFIG_TCG) += tcg/tcg-common.o tcg/tcg-runtime.o +obj-$(CONFIG_TCG) += tcg/tcg-common.o obj-$(CONFIG_TCG_INTERPRETER) += tcg/tci.o obj-$(CONFIG_TCG_INTERPRETER) += disas/tci.o obj-y += fpu/softfloat.o diff --git a/accel/tcg/Makefile.objs b/accel/tcg/Makefile.objs index f2422d0..228cd84 100644 --- a/accel/tcg/Makefile.objs +++ b/accel/tcg/Makefile.objs @@ -1,5 +1,6 @@ obj-$(CONFIG_SOFTMMU) += tcg-all.o obj-$(CONFIG_SOFTMMU) += cputlb.o +obj-y += tcg-runtime.o obj-y += cpu-exec.o cpu-exec-common.o translate-all.o obj-y += translator.o diff --git a/accel/tcg/tcg-runtime.c b/accel/tcg/tcg-runtime.c new file mode 100644 index 0000000..3e23649 --- /dev/null +++ b/accel/tcg/tcg-runtime.c @@ -0,0 +1,234 @@ +/* + * Tiny Code Generator for QEMU + * + * Copyright (c) 2008 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include "qemu/osdep.h" +#include "qemu/host-utils.h" +#include "cpu.h" +#include "exec/helper-proto.h" +#include "exec/cpu_ldst.h" +#include "exec/exec-all.h" +#include "exec/tb-hash.h" +#include "disas/disas.h" +#include "exec/log.h" + +/* 32-bit helpers */ + +int32_t HELPER(div_i32)(int32_t arg1, int32_t arg2) +{ + return arg1 / arg2; +} + +int32_t HELPER(rem_i32)(int32_t arg1, int32_t arg2) +{ + return arg1 % arg2; +} + +uint32_t HELPER(divu_i32)(uint32_t arg1, uint32_t arg2) +{ + return arg1 / arg2; +} + +uint32_t HELPER(remu_i32)(uint32_t arg1, uint32_t arg2) +{ + return arg1 % arg2; +} + +/* 64-bit helpers */ + +uint64_t HELPER(shl_i64)(uint64_t arg1, uint64_t arg2) +{ + return arg1 << arg2; +} + +uint64_t HELPER(shr_i64)(uint64_t arg1, uint64_t arg2) +{ + return arg1 >> arg2; +} + +int64_t HELPER(sar_i64)(int64_t arg1, int64_t arg2) +{ + return arg1 >> arg2; +} + +int64_t HELPER(div_i64)(int64_t arg1, int64_t arg2) +{ + return arg1 / arg2; +} + +int64_t HELPER(rem_i64)(int64_t arg1, int64_t arg2) +{ + return arg1 % arg2; +} + +uint64_t HELPER(divu_i64)(uint64_t arg1, uint64_t arg2) +{ + return arg1 / arg2; +} + +uint64_t HELPER(remu_i64)(uint64_t arg1, uint64_t arg2) +{ + return arg1 % arg2; +} + +uint64_t HELPER(muluh_i64)(uint64_t arg1, uint64_t arg2) +{ + uint64_t l, h; + mulu64(&l, &h, arg1, arg2); + return h; +} + +int64_t HELPER(mulsh_i64)(int64_t arg1, int64_t arg2) +{ + uint64_t l, h; + muls64(&l, &h, arg1, arg2); + return h; +} + +uint32_t HELPER(clz_i32)(uint32_t arg, uint32_t zero_val) +{ + return arg ? clz32(arg) : zero_val; +} + +uint32_t HELPER(ctz_i32)(uint32_t arg, uint32_t zero_val) +{ + return arg ? ctz32(arg) : zero_val; +} + +uint64_t HELPER(clz_i64)(uint64_t arg, uint64_t zero_val) +{ + return arg ? clz64(arg) : zero_val; +} + +uint64_t HELPER(ctz_i64)(uint64_t arg, uint64_t zero_val) +{ + return arg ? ctz64(arg) : zero_val; +} + +uint32_t HELPER(clrsb_i32)(uint32_t arg) +{ + return clrsb32(arg); +} + +uint64_t HELPER(clrsb_i64)(uint64_t arg) +{ + return clrsb64(arg); +} + +uint32_t HELPER(ctpop_i32)(uint32_t arg) +{ + return ctpop32(arg); +} + +uint64_t HELPER(ctpop_i64)(uint64_t arg) +{ + return ctpop64(arg); +} + +void *HELPER(lookup_tb_ptr)(CPUArchState *env, target_ulong addr) +{ + CPUState *cpu = ENV_GET_CPU(env); + TranslationBlock *tb; + target_ulong cs_base, pc; + uint32_t flags, addr_hash; + + addr_hash = tb_jmp_cache_hash_func(addr); + tb = atomic_rcu_read(&cpu->tb_jmp_cache[addr_hash]); + cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); + + if (unlikely(!(tb + && tb->pc == addr + && tb->cs_base == cs_base + && tb->flags == flags + && tb->trace_vcpu_dstate == *cpu->trace_dstate))) { + tb = tb_htable_lookup(cpu, addr, cs_base, flags); + if (!tb) { + return tcg_ctx.code_gen_epilogue; + } + atomic_set(&cpu->tb_jmp_cache[addr_hash], tb); + } + + qemu_log_mask_and_addr(CPU_LOG_EXEC, addr, + "Chain %p [%d: " TARGET_FMT_lx "] %s\n", + tb->tc_ptr, cpu->cpu_index, addr, + lookup_symbol(addr)); + return tb->tc_ptr; +} + +void HELPER(exit_atomic)(CPUArchState *env) +{ + cpu_loop_exit_atomic(ENV_GET_CPU(env), GETPC()); +} + +#ifndef CONFIG_SOFTMMU +/* The softmmu versions of these helpers are in cputlb.c. */ + +/* Do not allow unaligned operations to proceed. Return the host address. */ +static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, + int size, uintptr_t retaddr) +{ + /* Enforce qemu required alignment. */ + if (unlikely(addr & (size - 1))) { + cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr); + } + return g2h(addr); +} + +/* Macro to call the above, with local variables from the use context. */ +#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, DATA_SIZE, GETPC()) + +#define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END)) +#define EXTRA_ARGS + +#define DATA_SIZE 1 +#include "atomic_template.h" + +#define DATA_SIZE 2 +#include "atomic_template.h" + +#define DATA_SIZE 4 +#include "atomic_template.h" + +#ifdef CONFIG_ATOMIC64 +#define DATA_SIZE 8 +#include "atomic_template.h" +#endif + +/* The following is only callable from other helpers, and matches up + with the softmmu version. */ + +#ifdef CONFIG_ATOMIC128 + +#undef EXTRA_ARGS +#undef ATOMIC_NAME +#undef ATOMIC_MMU_LOOKUP + +#define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr +#define ATOMIC_NAME(X) \ + HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu)) +#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, DATA_SIZE, retaddr) + +#define DATA_SIZE 16 +#include "atomic_template.h" +#endif /* CONFIG_ATOMIC128 */ + +#endif /* !CONFIG_SOFTMMU */ diff --git a/accel/tcg/tcg-runtime.h b/accel/tcg/tcg-runtime.h new file mode 100644 index 0000000..c41d38a --- /dev/null +++ b/accel/tcg/tcg-runtime.h @@ -0,0 +1,136 @@ +DEF_HELPER_FLAGS_2(div_i32, TCG_CALL_NO_RWG_SE, s32, s32, s32) +DEF_HELPER_FLAGS_2(rem_i32, TCG_CALL_NO_RWG_SE, s32, s32, s32) +DEF_HELPER_FLAGS_2(divu_i32, TCG_CALL_NO_RWG_SE, i32, i32, i32) +DEF_HELPER_FLAGS_2(remu_i32, TCG_CALL_NO_RWG_SE, i32, i32, i32) + +DEF_HELPER_FLAGS_2(div_i64, TCG_CALL_NO_RWG_SE, s64, s64, s64) +DEF_HELPER_FLAGS_2(rem_i64, TCG_CALL_NO_RWG_SE, s64, s64, s64) +DEF_HELPER_FLAGS_2(divu_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(remu_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64) + +DEF_HELPER_FLAGS_2(shl_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(shr_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(sar_i64, TCG_CALL_NO_RWG_SE, s64, s64, s64) + +DEF_HELPER_FLAGS_2(mulsh_i64, TCG_CALL_NO_RWG_SE, s64, s64, s64) +DEF_HELPER_FLAGS_2(muluh_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64) + +DEF_HELPER_FLAGS_2(clz_i32, TCG_CALL_NO_RWG_SE, i32, i32, i32) +DEF_HELPER_FLAGS_2(ctz_i32, TCG_CALL_NO_RWG_SE, i32, i32, i32) +DEF_HELPER_FLAGS_2(clz_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(ctz_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_1(clrsb_i32, TCG_CALL_NO_RWG_SE, i32, i32) +DEF_HELPER_FLAGS_1(clrsb_i64, TCG_CALL_NO_RWG_SE, i64, i64) +DEF_HELPER_FLAGS_1(ctpop_i32, TCG_CALL_NO_RWG_SE, i32, i32) +DEF_HELPER_FLAGS_1(ctpop_i64, TCG_CALL_NO_RWG_SE, i64, i64) + +DEF_HELPER_FLAGS_2(lookup_tb_ptr, TCG_CALL_NO_WG_SE, ptr, env, tl) + +DEF_HELPER_FLAGS_1(exit_atomic, TCG_CALL_NO_WG, noreturn, env) + +#ifdef CONFIG_SOFTMMU + +DEF_HELPER_FLAGS_5(atomic_cmpxchgb, TCG_CALL_NO_WG, + i32, env, tl, i32, i32, i32) +DEF_HELPER_FLAGS_5(atomic_cmpxchgw_be, TCG_CALL_NO_WG, + i32, env, tl, i32, i32, i32) +DEF_HELPER_FLAGS_5(atomic_cmpxchgw_le, TCG_CALL_NO_WG, + i32, env, tl, i32, i32, i32) +DEF_HELPER_FLAGS_5(atomic_cmpxchgl_be, TCG_CALL_NO_WG, + i32, env, tl, i32, i32, i32) +DEF_HELPER_FLAGS_5(atomic_cmpxchgl_le, TCG_CALL_NO_WG, + i32, env, tl, i32, i32, i32) +#ifdef CONFIG_ATOMIC64 +DEF_HELPER_FLAGS_5(atomic_cmpxchgq_be, TCG_CALL_NO_WG, + i64, env, tl, i64, i64, i32) +DEF_HELPER_FLAGS_5(atomic_cmpxchgq_le, TCG_CALL_NO_WG, + i64, env, tl, i64, i64, i32) +#endif + +#ifdef CONFIG_ATOMIC64 +#define GEN_ATOMIC_HELPERS(NAME) \ + DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), b), \ + TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_le), \ + TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_be), \ + TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_le), \ + TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_be), \ + TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), q_le), \ + TCG_CALL_NO_WG, i64, env, tl, i64, i32) \ + DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), q_be), \ + TCG_CALL_NO_WG, i64, env, tl, i64, i32) +#else +#define GEN_ATOMIC_HELPERS(NAME) \ + DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), b), \ + TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_le), \ + TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_be), \ + TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_le), \ + TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_be), \ + TCG_CALL_NO_WG, i32, env, tl, i32, i32) +#endif /* CONFIG_ATOMIC64 */ + +#else + +DEF_HELPER_FLAGS_4(atomic_cmpxchgb, TCG_CALL_NO_WG, i32, env, tl, i32, i32) +DEF_HELPER_FLAGS_4(atomic_cmpxchgw_be, TCG_CALL_NO_WG, i32, env, tl, i32, i32) +DEF_HELPER_FLAGS_4(atomic_cmpxchgw_le, TCG_CALL_NO_WG, i32, env, tl, i32, i32) +DEF_HELPER_FLAGS_4(atomic_cmpxchgl_be, TCG_CALL_NO_WG, i32, env, tl, i32, i32) +DEF_HELPER_FLAGS_4(atomic_cmpxchgl_le, TCG_CALL_NO_WG, i32, env, tl, i32, i32) +#ifdef CONFIG_ATOMIC64 +DEF_HELPER_FLAGS_4(atomic_cmpxchgq_be, TCG_CALL_NO_WG, i64, env, tl, i64, i64) +DEF_HELPER_FLAGS_4(atomic_cmpxchgq_le, TCG_CALL_NO_WG, i64, env, tl, i64, i64) +#endif + +#ifdef CONFIG_ATOMIC64 +#define GEN_ATOMIC_HELPERS(NAME) \ + DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), b), \ + TCG_CALL_NO_WG, i32, env, tl, i32) \ + DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), w_le), \ + TCG_CALL_NO_WG, i32, env, tl, i32) \ + DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), w_be), \ + TCG_CALL_NO_WG, i32, env, tl, i32) \ + DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), l_le), \ + TCG_CALL_NO_WG, i32, env, tl, i32) \ + DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), l_be), \ + TCG_CALL_NO_WG, i32, env, tl, i32) \ + DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), q_le), \ + TCG_CALL_NO_WG, i64, env, tl, i64) \ + DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), q_be), \ + TCG_CALL_NO_WG, i64, env, tl, i64) +#else +#define GEN_ATOMIC_HELPERS(NAME) \ + DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), b), \ + TCG_CALL_NO_WG, i32, env, tl, i32) \ + DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), w_le), \ + TCG_CALL_NO_WG, i32, env, tl, i32) \ + DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), w_be), \ + TCG_CALL_NO_WG, i32, env, tl, i32) \ + DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), l_le), \ + TCG_CALL_NO_WG, i32, env, tl, i32) \ + DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), l_be), \ + TCG_CALL_NO_WG, i32, env, tl, i32) +#endif /* CONFIG_ATOMIC64 */ + +#endif /* CONFIG_SOFTMMU */ + +GEN_ATOMIC_HELPERS(fetch_add) +GEN_ATOMIC_HELPERS(fetch_and) +GEN_ATOMIC_HELPERS(fetch_or) +GEN_ATOMIC_HELPERS(fetch_xor) + +GEN_ATOMIC_HELPERS(add_fetch) +GEN_ATOMIC_HELPERS(and_fetch) +GEN_ATOMIC_HELPERS(or_fetch) +GEN_ATOMIC_HELPERS(xor_fetch) + +GEN_ATOMIC_HELPERS(xchg) + +#undef GEN_ATOMIC_HELPERS diff --git a/tcg/tcg-runtime.c b/tcg/tcg-runtime.c deleted file mode 100644 index 3e23649..0000000 --- a/tcg/tcg-runtime.c +++ /dev/null @@ -1,234 +0,0 @@ -/* - * Tiny Code Generator for QEMU - * - * Copyright (c) 2008 Fabrice Bellard - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ -#include "qemu/osdep.h" -#include "qemu/host-utils.h" -#include "cpu.h" -#include "exec/helper-proto.h" -#include "exec/cpu_ldst.h" -#include "exec/exec-all.h" -#include "exec/tb-hash.h" -#include "disas/disas.h" -#include "exec/log.h" - -/* 32-bit helpers */ - -int32_t HELPER(div_i32)(int32_t arg1, int32_t arg2) -{ - return arg1 / arg2; -} - -int32_t HELPER(rem_i32)(int32_t arg1, int32_t arg2) -{ - return arg1 % arg2; -} - -uint32_t HELPER(divu_i32)(uint32_t arg1, uint32_t arg2) -{ - return arg1 / arg2; -} - -uint32_t HELPER(remu_i32)(uint32_t arg1, uint32_t arg2) -{ - return arg1 % arg2; -} - -/* 64-bit helpers */ - -uint64_t HELPER(shl_i64)(uint64_t arg1, uint64_t arg2) -{ - return arg1 << arg2; -} - -uint64_t HELPER(shr_i64)(uint64_t arg1, uint64_t arg2) -{ - return arg1 >> arg2; -} - -int64_t HELPER(sar_i64)(int64_t arg1, int64_t arg2) -{ - return arg1 >> arg2; -} - -int64_t HELPER(div_i64)(int64_t arg1, int64_t arg2) -{ - return arg1 / arg2; -} - -int64_t HELPER(rem_i64)(int64_t arg1, int64_t arg2) -{ - return arg1 % arg2; -} - -uint64_t HELPER(divu_i64)(uint64_t arg1, uint64_t arg2) -{ - return arg1 / arg2; -} - -uint64_t HELPER(remu_i64)(uint64_t arg1, uint64_t arg2) -{ - return arg1 % arg2; -} - -uint64_t HELPER(muluh_i64)(uint64_t arg1, uint64_t arg2) -{ - uint64_t l, h; - mulu64(&l, &h, arg1, arg2); - return h; -} - -int64_t HELPER(mulsh_i64)(int64_t arg1, int64_t arg2) -{ - uint64_t l, h; - muls64(&l, &h, arg1, arg2); - return h; -} - -uint32_t HELPER(clz_i32)(uint32_t arg, uint32_t zero_val) -{ - return arg ? clz32(arg) : zero_val; -} - -uint32_t HELPER(ctz_i32)(uint32_t arg, uint32_t zero_val) -{ - return arg ? ctz32(arg) : zero_val; -} - -uint64_t HELPER(clz_i64)(uint64_t arg, uint64_t zero_val) -{ - return arg ? clz64(arg) : zero_val; -} - -uint64_t HELPER(ctz_i64)(uint64_t arg, uint64_t zero_val) -{ - return arg ? ctz64(arg) : zero_val; -} - -uint32_t HELPER(clrsb_i32)(uint32_t arg) -{ - return clrsb32(arg); -} - -uint64_t HELPER(clrsb_i64)(uint64_t arg) -{ - return clrsb64(arg); -} - -uint32_t HELPER(ctpop_i32)(uint32_t arg) -{ - return ctpop32(arg); -} - -uint64_t HELPER(ctpop_i64)(uint64_t arg) -{ - return ctpop64(arg); -} - -void *HELPER(lookup_tb_ptr)(CPUArchState *env, target_ulong addr) -{ - CPUState *cpu = ENV_GET_CPU(env); - TranslationBlock *tb; - target_ulong cs_base, pc; - uint32_t flags, addr_hash; - - addr_hash = tb_jmp_cache_hash_func(addr); - tb = atomic_rcu_read(&cpu->tb_jmp_cache[addr_hash]); - cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); - - if (unlikely(!(tb - && tb->pc == addr - && tb->cs_base == cs_base - && tb->flags == flags - && tb->trace_vcpu_dstate == *cpu->trace_dstate))) { - tb = tb_htable_lookup(cpu, addr, cs_base, flags); - if (!tb) { - return tcg_ctx.code_gen_epilogue; - } - atomic_set(&cpu->tb_jmp_cache[addr_hash], tb); - } - - qemu_log_mask_and_addr(CPU_LOG_EXEC, addr, - "Chain %p [%d: " TARGET_FMT_lx "] %s\n", - tb->tc_ptr, cpu->cpu_index, addr, - lookup_symbol(addr)); - return tb->tc_ptr; -} - -void HELPER(exit_atomic)(CPUArchState *env) -{ - cpu_loop_exit_atomic(ENV_GET_CPU(env), GETPC()); -} - -#ifndef CONFIG_SOFTMMU -/* The softmmu versions of these helpers are in cputlb.c. */ - -/* Do not allow unaligned operations to proceed. Return the host address. */ -static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, - int size, uintptr_t retaddr) -{ - /* Enforce qemu required alignment. */ - if (unlikely(addr & (size - 1))) { - cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr); - } - return g2h(addr); -} - -/* Macro to call the above, with local variables from the use context. */ -#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, DATA_SIZE, GETPC()) - -#define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END)) -#define EXTRA_ARGS - -#define DATA_SIZE 1 -#include "atomic_template.h" - -#define DATA_SIZE 2 -#include "atomic_template.h" - -#define DATA_SIZE 4 -#include "atomic_template.h" - -#ifdef CONFIG_ATOMIC64 -#define DATA_SIZE 8 -#include "atomic_template.h" -#endif - -/* The following is only callable from other helpers, and matches up - with the softmmu version. */ - -#ifdef CONFIG_ATOMIC128 - -#undef EXTRA_ARGS -#undef ATOMIC_NAME -#undef ATOMIC_MMU_LOOKUP - -#define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr -#define ATOMIC_NAME(X) \ - HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu)) -#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, DATA_SIZE, retaddr) - -#define DATA_SIZE 16 -#include "atomic_template.h" -#endif /* CONFIG_ATOMIC128 */ - -#endif /* !CONFIG_SOFTMMU */ diff --git a/tcg/tcg-runtime.h b/tcg/tcg-runtime.h deleted file mode 100644 index c41d38a..0000000 --- a/tcg/tcg-runtime.h +++ /dev/null @@ -1,136 +0,0 @@ -DEF_HELPER_FLAGS_2(div_i32, TCG_CALL_NO_RWG_SE, s32, s32, s32) -DEF_HELPER_FLAGS_2(rem_i32, TCG_CALL_NO_RWG_SE, s32, s32, s32) -DEF_HELPER_FLAGS_2(divu_i32, TCG_CALL_NO_RWG_SE, i32, i32, i32) -DEF_HELPER_FLAGS_2(remu_i32, TCG_CALL_NO_RWG_SE, i32, i32, i32) - -DEF_HELPER_FLAGS_2(div_i64, TCG_CALL_NO_RWG_SE, s64, s64, s64) -DEF_HELPER_FLAGS_2(rem_i64, TCG_CALL_NO_RWG_SE, s64, s64, s64) -DEF_HELPER_FLAGS_2(divu_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64) -DEF_HELPER_FLAGS_2(remu_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64) - -DEF_HELPER_FLAGS_2(shl_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64) -DEF_HELPER_FLAGS_2(shr_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64) -DEF_HELPER_FLAGS_2(sar_i64, TCG_CALL_NO_RWG_SE, s64, s64, s64) - -DEF_HELPER_FLAGS_2(mulsh_i64, TCG_CALL_NO_RWG_SE, s64, s64, s64) -DEF_HELPER_FLAGS_2(muluh_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64) - -DEF_HELPER_FLAGS_2(clz_i32, TCG_CALL_NO_RWG_SE, i32, i32, i32) -DEF_HELPER_FLAGS_2(ctz_i32, TCG_CALL_NO_RWG_SE, i32, i32, i32) -DEF_HELPER_FLAGS_2(clz_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64) -DEF_HELPER_FLAGS_2(ctz_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64) -DEF_HELPER_FLAGS_1(clrsb_i32, TCG_CALL_NO_RWG_SE, i32, i32) -DEF_HELPER_FLAGS_1(clrsb_i64, TCG_CALL_NO_RWG_SE, i64, i64) -DEF_HELPER_FLAGS_1(ctpop_i32, TCG_CALL_NO_RWG_SE, i32, i32) -DEF_HELPER_FLAGS_1(ctpop_i64, TCG_CALL_NO_RWG_SE, i64, i64) - -DEF_HELPER_FLAGS_2(lookup_tb_ptr, TCG_CALL_NO_WG_SE, ptr, env, tl) - -DEF_HELPER_FLAGS_1(exit_atomic, TCG_CALL_NO_WG, noreturn, env) - -#ifdef CONFIG_SOFTMMU - -DEF_HELPER_FLAGS_5(atomic_cmpxchgb, TCG_CALL_NO_WG, - i32, env, tl, i32, i32, i32) -DEF_HELPER_FLAGS_5(atomic_cmpxchgw_be, TCG_CALL_NO_WG, - i32, env, tl, i32, i32, i32) -DEF_HELPER_FLAGS_5(atomic_cmpxchgw_le, TCG_CALL_NO_WG, - i32, env, tl, i32, i32, i32) -DEF_HELPER_FLAGS_5(atomic_cmpxchgl_be, TCG_CALL_NO_WG, - i32, env, tl, i32, i32, i32) -DEF_HELPER_FLAGS_5(atomic_cmpxchgl_le, TCG_CALL_NO_WG, - i32, env, tl, i32, i32, i32) -#ifdef CONFIG_ATOMIC64 -DEF_HELPER_FLAGS_5(atomic_cmpxchgq_be, TCG_CALL_NO_WG, - i64, env, tl, i64, i64, i32) -DEF_HELPER_FLAGS_5(atomic_cmpxchgq_le, TCG_CALL_NO_WG, - i64, env, tl, i64, i64, i32) -#endif - -#ifdef CONFIG_ATOMIC64 -#define GEN_ATOMIC_HELPERS(NAME) \ - DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), b), \ - TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ - DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_le), \ - TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ - DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_be), \ - TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ - DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_le), \ - TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ - DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_be), \ - TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ - DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), q_le), \ - TCG_CALL_NO_WG, i64, env, tl, i64, i32) \ - DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), q_be), \ - TCG_CALL_NO_WG, i64, env, tl, i64, i32) -#else -#define GEN_ATOMIC_HELPERS(NAME) \ - DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), b), \ - TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ - DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_le), \ - TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ - DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_be), \ - TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ - DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_le), \ - TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ - DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_be), \ - TCG_CALL_NO_WG, i32, env, tl, i32, i32) -#endif /* CONFIG_ATOMIC64 */ - -#else - -DEF_HELPER_FLAGS_4(atomic_cmpxchgb, TCG_CALL_NO_WG, i32, env, tl, i32, i32) -DEF_HELPER_FLAGS_4(atomic_cmpxchgw_be, TCG_CALL_NO_WG, i32, env, tl, i32, i32) -DEF_HELPER_FLAGS_4(atomic_cmpxchgw_le, TCG_CALL_NO_WG, i32, env, tl, i32, i32) -DEF_HELPER_FLAGS_4(atomic_cmpxchgl_be, TCG_CALL_NO_WG, i32, env, tl, i32, i32) -DEF_HELPER_FLAGS_4(atomic_cmpxchgl_le, TCG_CALL_NO_WG, i32, env, tl, i32, i32) -#ifdef CONFIG_ATOMIC64 -DEF_HELPER_FLAGS_4(atomic_cmpxchgq_be, TCG_CALL_NO_WG, i64, env, tl, i64, i64) -DEF_HELPER_FLAGS_4(atomic_cmpxchgq_le, TCG_CALL_NO_WG, i64, env, tl, i64, i64) -#endif - -#ifdef CONFIG_ATOMIC64 -#define GEN_ATOMIC_HELPERS(NAME) \ - DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), b), \ - TCG_CALL_NO_WG, i32, env, tl, i32) \ - DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), w_le), \ - TCG_CALL_NO_WG, i32, env, tl, i32) \ - DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), w_be), \ - TCG_CALL_NO_WG, i32, env, tl, i32) \ - DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), l_le), \ - TCG_CALL_NO_WG, i32, env, tl, i32) \ - DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), l_be), \ - TCG_CALL_NO_WG, i32, env, tl, i32) \ - DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), q_le), \ - TCG_CALL_NO_WG, i64, env, tl, i64) \ - DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), q_be), \ - TCG_CALL_NO_WG, i64, env, tl, i64) -#else -#define GEN_ATOMIC_HELPERS(NAME) \ - DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), b), \ - TCG_CALL_NO_WG, i32, env, tl, i32) \ - DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), w_le), \ - TCG_CALL_NO_WG, i32, env, tl, i32) \ - DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), w_be), \ - TCG_CALL_NO_WG, i32, env, tl, i32) \ - DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), l_le), \ - TCG_CALL_NO_WG, i32, env, tl, i32) \ - DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), l_be), \ - TCG_CALL_NO_WG, i32, env, tl, i32) -#endif /* CONFIG_ATOMIC64 */ - -#endif /* CONFIG_SOFTMMU */ - -GEN_ATOMIC_HELPERS(fetch_add) -GEN_ATOMIC_HELPERS(fetch_and) -GEN_ATOMIC_HELPERS(fetch_or) -GEN_ATOMIC_HELPERS(fetch_xor) - -GEN_ATOMIC_HELPERS(add_fetch) -GEN_ATOMIC_HELPERS(and_fetch) -GEN_ATOMIC_HELPERS(or_fetch) -GEN_ATOMIC_HELPERS(xor_fetch) - -GEN_ATOMIC_HELPERS(xchg) - -#undef GEN_ATOMIC_HELPERS -- cgit v1.1 From 10f7d4d53d54fbe054333aa8e75f67f20052647a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Mon, 11 Sep 2017 18:33:27 -0300 Subject: accel/tcg: move atomic_template.h to accel/tcg/ MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Philippe Mathieu-Daudé Tested-by: Thomas Huth Message-Id: <20170911213328.9701-5-f4bug@amsat.org> Signed-off-by: Richard Henderson --- accel/tcg/atomic_template.h | 215 ++++++++++++++++++++++++++++++++++++++++++++ atomic_template.h | 215 -------------------------------------------- 2 files changed, 215 insertions(+), 215 deletions(-) create mode 100644 accel/tcg/atomic_template.h delete mode 100644 atomic_template.h diff --git a/accel/tcg/atomic_template.h b/accel/tcg/atomic_template.h new file mode 100644 index 0000000..b400b2a --- /dev/null +++ b/accel/tcg/atomic_template.h @@ -0,0 +1,215 @@ +/* + * Atomic helper templates + * Included from tcg-runtime.c and cputlb.c. + * + * Copyright (c) 2016 Red Hat, Inc + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#if DATA_SIZE == 16 +# define SUFFIX o +# define DATA_TYPE Int128 +# define BSWAP bswap128 +#elif DATA_SIZE == 8 +# define SUFFIX q +# define DATA_TYPE uint64_t +# define BSWAP bswap64 +#elif DATA_SIZE == 4 +# define SUFFIX l +# define DATA_TYPE uint32_t +# define BSWAP bswap32 +#elif DATA_SIZE == 2 +# define SUFFIX w +# define DATA_TYPE uint16_t +# define BSWAP bswap16 +#elif DATA_SIZE == 1 +# define SUFFIX b +# define DATA_TYPE uint8_t +# define BSWAP +#else +# error unsupported data size +#endif + +#if DATA_SIZE >= 4 +# define ABI_TYPE DATA_TYPE +#else +# define ABI_TYPE uint32_t +#endif + +/* Define host-endian atomic operations. Note that END is used within + the ATOMIC_NAME macro, and redefined below. */ +#if DATA_SIZE == 1 +# define END +#elif defined(HOST_WORDS_BIGENDIAN) +# define END _be +#else +# define END _le +#endif + +ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr, + ABI_TYPE cmpv, ABI_TYPE newv EXTRA_ARGS) +{ + DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; + return atomic_cmpxchg__nocheck(haddr, cmpv, newv); +} + +#if DATA_SIZE >= 16 +ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS) +{ + DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP; + __atomic_load(haddr, &val, __ATOMIC_RELAXED); + return val; +} + +void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, + ABI_TYPE val EXTRA_ARGS) +{ + DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; + __atomic_store(haddr, &val, __ATOMIC_RELAXED); +} +#else +ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, + ABI_TYPE val EXTRA_ARGS) +{ + DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; + return atomic_xchg__nocheck(haddr, val); +} + +#define GEN_ATOMIC_HELPER(X) \ +ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ + ABI_TYPE val EXTRA_ARGS) \ +{ \ + DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \ + return atomic_##X(haddr, val); \ +} \ + +GEN_ATOMIC_HELPER(fetch_add) +GEN_ATOMIC_HELPER(fetch_and) +GEN_ATOMIC_HELPER(fetch_or) +GEN_ATOMIC_HELPER(fetch_xor) +GEN_ATOMIC_HELPER(add_fetch) +GEN_ATOMIC_HELPER(and_fetch) +GEN_ATOMIC_HELPER(or_fetch) +GEN_ATOMIC_HELPER(xor_fetch) + +#undef GEN_ATOMIC_HELPER +#endif /* DATA SIZE >= 16 */ + +#undef END + +#if DATA_SIZE > 1 + +/* Define reverse-host-endian atomic operations. Note that END is used + within the ATOMIC_NAME macro. */ +#ifdef HOST_WORDS_BIGENDIAN +# define END _le +#else +# define END _be +#endif + +ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr, + ABI_TYPE cmpv, ABI_TYPE newv EXTRA_ARGS) +{ + DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; + return BSWAP(atomic_cmpxchg__nocheck(haddr, BSWAP(cmpv), BSWAP(newv))); +} + +#if DATA_SIZE >= 16 +ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS) +{ + DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP; + __atomic_load(haddr, &val, __ATOMIC_RELAXED); + return BSWAP(val); +} + +void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, + ABI_TYPE val EXTRA_ARGS) +{ + DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; + val = BSWAP(val); + __atomic_store(haddr, &val, __ATOMIC_RELAXED); +} +#else +ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, + ABI_TYPE val EXTRA_ARGS) +{ + DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; + return BSWAP(atomic_xchg__nocheck(haddr, BSWAP(val))); +} + +#define GEN_ATOMIC_HELPER(X) \ +ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ + ABI_TYPE val EXTRA_ARGS) \ +{ \ + DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \ + return BSWAP(atomic_##X(haddr, BSWAP(val))); \ +} + +GEN_ATOMIC_HELPER(fetch_and) +GEN_ATOMIC_HELPER(fetch_or) +GEN_ATOMIC_HELPER(fetch_xor) +GEN_ATOMIC_HELPER(and_fetch) +GEN_ATOMIC_HELPER(or_fetch) +GEN_ATOMIC_HELPER(xor_fetch) + +#undef GEN_ATOMIC_HELPER + +/* Note that for addition, we need to use a separate cmpxchg loop instead + of bswaps for the reverse-host-endian helpers. */ +ABI_TYPE ATOMIC_NAME(fetch_add)(CPUArchState *env, target_ulong addr, + ABI_TYPE val EXTRA_ARGS) +{ + DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; + DATA_TYPE ldo, ldn, ret, sto; + + ldo = atomic_read__nocheck(haddr); + while (1) { + ret = BSWAP(ldo); + sto = BSWAP(ret + val); + ldn = atomic_cmpxchg__nocheck(haddr, ldo, sto); + if (ldn == ldo) { + return ret; + } + ldo = ldn; + } +} + +ABI_TYPE ATOMIC_NAME(add_fetch)(CPUArchState *env, target_ulong addr, + ABI_TYPE val EXTRA_ARGS) +{ + DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; + DATA_TYPE ldo, ldn, ret, sto; + + ldo = atomic_read__nocheck(haddr); + while (1) { + ret = BSWAP(ldo) + val; + sto = BSWAP(ret); + ldn = atomic_cmpxchg__nocheck(haddr, ldo, sto); + if (ldn == ldo) { + return ret; + } + ldo = ldn; + } +} +#endif /* DATA_SIZE >= 16 */ + +#undef END +#endif /* DATA_SIZE > 1 */ + +#undef BSWAP +#undef ABI_TYPE +#undef DATA_TYPE +#undef SUFFIX +#undef DATA_SIZE diff --git a/atomic_template.h b/atomic_template.h deleted file mode 100644 index b400b2a..0000000 --- a/atomic_template.h +++ /dev/null @@ -1,215 +0,0 @@ -/* - * Atomic helper templates - * Included from tcg-runtime.c and cputlb.c. - * - * Copyright (c) 2016 Red Hat, Inc - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ - -#if DATA_SIZE == 16 -# define SUFFIX o -# define DATA_TYPE Int128 -# define BSWAP bswap128 -#elif DATA_SIZE == 8 -# define SUFFIX q -# define DATA_TYPE uint64_t -# define BSWAP bswap64 -#elif DATA_SIZE == 4 -# define SUFFIX l -# define DATA_TYPE uint32_t -# define BSWAP bswap32 -#elif DATA_SIZE == 2 -# define SUFFIX w -# define DATA_TYPE uint16_t -# define BSWAP bswap16 -#elif DATA_SIZE == 1 -# define SUFFIX b -# define DATA_TYPE uint8_t -# define BSWAP -#else -# error unsupported data size -#endif - -#if DATA_SIZE >= 4 -# define ABI_TYPE DATA_TYPE -#else -# define ABI_TYPE uint32_t -#endif - -/* Define host-endian atomic operations. Note that END is used within - the ATOMIC_NAME macro, and redefined below. */ -#if DATA_SIZE == 1 -# define END -#elif defined(HOST_WORDS_BIGENDIAN) -# define END _be -#else -# define END _le -#endif - -ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr, - ABI_TYPE cmpv, ABI_TYPE newv EXTRA_ARGS) -{ - DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; - return atomic_cmpxchg__nocheck(haddr, cmpv, newv); -} - -#if DATA_SIZE >= 16 -ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS) -{ - DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP; - __atomic_load(haddr, &val, __ATOMIC_RELAXED); - return val; -} - -void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, - ABI_TYPE val EXTRA_ARGS) -{ - DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; - __atomic_store(haddr, &val, __ATOMIC_RELAXED); -} -#else -ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, - ABI_TYPE val EXTRA_ARGS) -{ - DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; - return atomic_xchg__nocheck(haddr, val); -} - -#define GEN_ATOMIC_HELPER(X) \ -ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ - ABI_TYPE val EXTRA_ARGS) \ -{ \ - DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \ - return atomic_##X(haddr, val); \ -} \ - -GEN_ATOMIC_HELPER(fetch_add) -GEN_ATOMIC_HELPER(fetch_and) -GEN_ATOMIC_HELPER(fetch_or) -GEN_ATOMIC_HELPER(fetch_xor) -GEN_ATOMIC_HELPER(add_fetch) -GEN_ATOMIC_HELPER(and_fetch) -GEN_ATOMIC_HELPER(or_fetch) -GEN_ATOMIC_HELPER(xor_fetch) - -#undef GEN_ATOMIC_HELPER -#endif /* DATA SIZE >= 16 */ - -#undef END - -#if DATA_SIZE > 1 - -/* Define reverse-host-endian atomic operations. Note that END is used - within the ATOMIC_NAME macro. */ -#ifdef HOST_WORDS_BIGENDIAN -# define END _le -#else -# define END _be -#endif - -ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr, - ABI_TYPE cmpv, ABI_TYPE newv EXTRA_ARGS) -{ - DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; - return BSWAP(atomic_cmpxchg__nocheck(haddr, BSWAP(cmpv), BSWAP(newv))); -} - -#if DATA_SIZE >= 16 -ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS) -{ - DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP; - __atomic_load(haddr, &val, __ATOMIC_RELAXED); - return BSWAP(val); -} - -void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, - ABI_TYPE val EXTRA_ARGS) -{ - DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; - val = BSWAP(val); - __atomic_store(haddr, &val, __ATOMIC_RELAXED); -} -#else -ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, - ABI_TYPE val EXTRA_ARGS) -{ - DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; - return BSWAP(atomic_xchg__nocheck(haddr, BSWAP(val))); -} - -#define GEN_ATOMIC_HELPER(X) \ -ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ - ABI_TYPE val EXTRA_ARGS) \ -{ \ - DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \ - return BSWAP(atomic_##X(haddr, BSWAP(val))); \ -} - -GEN_ATOMIC_HELPER(fetch_and) -GEN_ATOMIC_HELPER(fetch_or) -GEN_ATOMIC_HELPER(fetch_xor) -GEN_ATOMIC_HELPER(and_fetch) -GEN_ATOMIC_HELPER(or_fetch) -GEN_ATOMIC_HELPER(xor_fetch) - -#undef GEN_ATOMIC_HELPER - -/* Note that for addition, we need to use a separate cmpxchg loop instead - of bswaps for the reverse-host-endian helpers. */ -ABI_TYPE ATOMIC_NAME(fetch_add)(CPUArchState *env, target_ulong addr, - ABI_TYPE val EXTRA_ARGS) -{ - DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; - DATA_TYPE ldo, ldn, ret, sto; - - ldo = atomic_read__nocheck(haddr); - while (1) { - ret = BSWAP(ldo); - sto = BSWAP(ret + val); - ldn = atomic_cmpxchg__nocheck(haddr, ldo, sto); - if (ldn == ldo) { - return ret; - } - ldo = ldn; - } -} - -ABI_TYPE ATOMIC_NAME(add_fetch)(CPUArchState *env, target_ulong addr, - ABI_TYPE val EXTRA_ARGS) -{ - DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; - DATA_TYPE ldo, ldn, ret, sto; - - ldo = atomic_read__nocheck(haddr); - while (1) { - ret = BSWAP(ldo) + val; - sto = BSWAP(ret); - ldn = atomic_cmpxchg__nocheck(haddr, ldo, sto); - if (ldn == ldo) { - return ret; - } - ldo = ldn; - } -} -#endif /* DATA_SIZE >= 16 */ - -#undef END -#endif /* DATA_SIZE > 1 */ - -#undef BSWAP -#undef ABI_TYPE -#undef DATA_TYPE -#undef SUFFIX -#undef DATA_SIZE -- cgit v1.1 From a411d2963785929c3e47a48335b43219617edf2b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Tue, 12 Sep 2017 18:19:34 -0300 Subject: accel/tcg: move USER code to user-exec.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Suggested-by: Paolo Bonzini Signed-off-by: Philippe Mathieu-Daudé Message-Id: <20170912211934.20919-1-f4bug@amsat.org> Signed-off-by: Richard Henderson --- accel/tcg/tcg-runtime.c | 54 ------------------------------------------------- accel/tcg/user-exec.c | 52 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 52 insertions(+), 54 deletions(-) diff --git a/accel/tcg/tcg-runtime.c b/accel/tcg/tcg-runtime.c index 3e23649..aafb171 100644 --- a/accel/tcg/tcg-runtime.c +++ b/accel/tcg/tcg-runtime.c @@ -178,57 +178,3 @@ void HELPER(exit_atomic)(CPUArchState *env) { cpu_loop_exit_atomic(ENV_GET_CPU(env), GETPC()); } - -#ifndef CONFIG_SOFTMMU -/* The softmmu versions of these helpers are in cputlb.c. */ - -/* Do not allow unaligned operations to proceed. Return the host address. */ -static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, - int size, uintptr_t retaddr) -{ - /* Enforce qemu required alignment. */ - if (unlikely(addr & (size - 1))) { - cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr); - } - return g2h(addr); -} - -/* Macro to call the above, with local variables from the use context. */ -#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, DATA_SIZE, GETPC()) - -#define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END)) -#define EXTRA_ARGS - -#define DATA_SIZE 1 -#include "atomic_template.h" - -#define DATA_SIZE 2 -#include "atomic_template.h" - -#define DATA_SIZE 4 -#include "atomic_template.h" - -#ifdef CONFIG_ATOMIC64 -#define DATA_SIZE 8 -#include "atomic_template.h" -#endif - -/* The following is only callable from other helpers, and matches up - with the softmmu version. */ - -#ifdef CONFIG_ATOMIC128 - -#undef EXTRA_ARGS -#undef ATOMIC_NAME -#undef ATOMIC_MMU_LOOKUP - -#define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr -#define ATOMIC_NAME(X) \ - HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu)) -#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, DATA_SIZE, retaddr) - -#define DATA_SIZE 16 -#include "atomic_template.h" -#endif /* CONFIG_ATOMIC128 */ - -#endif /* !CONFIG_SOFTMMU */ diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c index 2a975ea..492ea08 100644 --- a/accel/tcg/user-exec.c +++ b/accel/tcg/user-exec.c @@ -24,6 +24,7 @@ #include "qemu/bitops.h" #include "exec/cpu_ldst.h" #include "translate-all.h" +#include "exec/helper-proto.h" #undef EAX #undef ECX @@ -573,3 +574,54 @@ int cpu_signal_handler(int host_signum, void *pinfo, #error host CPU specific signal handler needed #endif + +/* The softmmu versions of these helpers are in cputlb.c. */ + +/* Do not allow unaligned operations to proceed. Return the host address. */ +static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, + int size, uintptr_t retaddr) +{ + /* Enforce qemu required alignment. */ + if (unlikely(addr & (size - 1))) { + cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr); + } + return g2h(addr); +} + +/* Macro to call the above, with local variables from the use context. */ +#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, DATA_SIZE, GETPC()) + +#define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END)) +#define EXTRA_ARGS + +#define DATA_SIZE 1 +#include "atomic_template.h" + +#define DATA_SIZE 2 +#include "atomic_template.h" + +#define DATA_SIZE 4 +#include "atomic_template.h" + +#ifdef CONFIG_ATOMIC64 +#define DATA_SIZE 8 +#include "atomic_template.h" +#endif + +/* The following is only callable from other helpers, and matches up + with the softmmu version. */ + +#ifdef CONFIG_ATOMIC128 + +#undef EXTRA_ARGS +#undef ATOMIC_NAME +#undef ATOMIC_MMU_LOOKUP + +#define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr +#define ATOMIC_NAME(X) \ + HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu)) +#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, DATA_SIZE, retaddr) + +#define DATA_SIZE 16 +#include "atomic_template.h" +#endif /* CONFIG_ATOMIC128 */ -- cgit v1.1 From be0f34b5840312bbe9627c2b9f68a25f32903dae Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Thu, 17 Aug 2017 07:43:20 -0700 Subject: tcg: Add tcg_op_supported MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reviewed-by: Philippe Mathieu-Daudé Reviewed-by: Alex Bennée Signed-off-by: Richard Henderson --- tcg/tcg.c | 227 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++- tcg/tcg.h | 2 + 2 files changed, 226 insertions(+), 3 deletions(-) diff --git a/tcg/tcg.c b/tcg/tcg.c index fd8a3df..b65a732 100644 --- a/tcg/tcg.c +++ b/tcg/tcg.c @@ -749,6 +749,229 @@ int tcg_check_temp_count(void) } #endif +/* Return true if OP may appear in the opcode stream. + Test the runtime variable that controls each opcode. */ +bool tcg_op_supported(TCGOpcode op) +{ + switch (op) { + case INDEX_op_discard: + case INDEX_op_set_label: + case INDEX_op_call: + case INDEX_op_br: + case INDEX_op_mb: + case INDEX_op_insn_start: + case INDEX_op_exit_tb: + case INDEX_op_goto_tb: + case INDEX_op_qemu_ld_i32: + case INDEX_op_qemu_st_i32: + case INDEX_op_qemu_ld_i64: + case INDEX_op_qemu_st_i64: + return true; + + case INDEX_op_goto_ptr: + return TCG_TARGET_HAS_goto_ptr; + + case INDEX_op_mov_i32: + case INDEX_op_movi_i32: + case INDEX_op_setcond_i32: + case INDEX_op_brcond_i32: + case INDEX_op_ld8u_i32: + case INDEX_op_ld8s_i32: + case INDEX_op_ld16u_i32: + case INDEX_op_ld16s_i32: + case INDEX_op_ld_i32: + case INDEX_op_st8_i32: + case INDEX_op_st16_i32: + case INDEX_op_st_i32: + case INDEX_op_add_i32: + case INDEX_op_sub_i32: + case INDEX_op_mul_i32: + case INDEX_op_and_i32: + case INDEX_op_or_i32: + case INDEX_op_xor_i32: + case INDEX_op_shl_i32: + case INDEX_op_shr_i32: + case INDEX_op_sar_i32: + return true; + + case INDEX_op_movcond_i32: + return TCG_TARGET_HAS_movcond_i32; + case INDEX_op_div_i32: + case INDEX_op_divu_i32: + return TCG_TARGET_HAS_div_i32; + case INDEX_op_rem_i32: + case INDEX_op_remu_i32: + return TCG_TARGET_HAS_rem_i32; + case INDEX_op_div2_i32: + case INDEX_op_divu2_i32: + return TCG_TARGET_HAS_div2_i32; + case INDEX_op_rotl_i32: + case INDEX_op_rotr_i32: + return TCG_TARGET_HAS_rot_i32; + case INDEX_op_deposit_i32: + return TCG_TARGET_HAS_deposit_i32; + case INDEX_op_extract_i32: + return TCG_TARGET_HAS_extract_i32; + case INDEX_op_sextract_i32: + return TCG_TARGET_HAS_sextract_i32; + case INDEX_op_add2_i32: + return TCG_TARGET_HAS_add2_i32; + case INDEX_op_sub2_i32: + return TCG_TARGET_HAS_sub2_i32; + case INDEX_op_mulu2_i32: + return TCG_TARGET_HAS_mulu2_i32; + case INDEX_op_muls2_i32: + return TCG_TARGET_HAS_muls2_i32; + case INDEX_op_muluh_i32: + return TCG_TARGET_HAS_muluh_i32; + case INDEX_op_mulsh_i32: + return TCG_TARGET_HAS_mulsh_i32; + case INDEX_op_ext8s_i32: + return TCG_TARGET_HAS_ext8s_i32; + case INDEX_op_ext16s_i32: + return TCG_TARGET_HAS_ext16s_i32; + case INDEX_op_ext8u_i32: + return TCG_TARGET_HAS_ext8u_i32; + case INDEX_op_ext16u_i32: + return TCG_TARGET_HAS_ext16u_i32; + case INDEX_op_bswap16_i32: + return TCG_TARGET_HAS_bswap16_i32; + case INDEX_op_bswap32_i32: + return TCG_TARGET_HAS_bswap32_i32; + case INDEX_op_not_i32: + return TCG_TARGET_HAS_not_i32; + case INDEX_op_neg_i32: + return TCG_TARGET_HAS_neg_i32; + case INDEX_op_andc_i32: + return TCG_TARGET_HAS_andc_i32; + case INDEX_op_orc_i32: + return TCG_TARGET_HAS_orc_i32; + case INDEX_op_eqv_i32: + return TCG_TARGET_HAS_eqv_i32; + case INDEX_op_nand_i32: + return TCG_TARGET_HAS_nand_i32; + case INDEX_op_nor_i32: + return TCG_TARGET_HAS_nor_i32; + case INDEX_op_clz_i32: + return TCG_TARGET_HAS_clz_i32; + case INDEX_op_ctz_i32: + return TCG_TARGET_HAS_ctz_i32; + case INDEX_op_ctpop_i32: + return TCG_TARGET_HAS_ctpop_i32; + + case INDEX_op_brcond2_i32: + case INDEX_op_setcond2_i32: + return TCG_TARGET_REG_BITS == 32; + + case INDEX_op_mov_i64: + case INDEX_op_movi_i64: + case INDEX_op_setcond_i64: + case INDEX_op_brcond_i64: + case INDEX_op_ld8u_i64: + case INDEX_op_ld8s_i64: + case INDEX_op_ld16u_i64: + case INDEX_op_ld16s_i64: + case INDEX_op_ld32u_i64: + case INDEX_op_ld32s_i64: + case INDEX_op_ld_i64: + case INDEX_op_st8_i64: + case INDEX_op_st16_i64: + case INDEX_op_st32_i64: + case INDEX_op_st_i64: + case INDEX_op_add_i64: + case INDEX_op_sub_i64: + case INDEX_op_mul_i64: + case INDEX_op_and_i64: + case INDEX_op_or_i64: + case INDEX_op_xor_i64: + case INDEX_op_shl_i64: + case INDEX_op_shr_i64: + case INDEX_op_sar_i64: + case INDEX_op_ext_i32_i64: + case INDEX_op_extu_i32_i64: + return TCG_TARGET_REG_BITS == 64; + + case INDEX_op_movcond_i64: + return TCG_TARGET_HAS_movcond_i64; + case INDEX_op_div_i64: + case INDEX_op_divu_i64: + return TCG_TARGET_HAS_div_i64; + case INDEX_op_rem_i64: + case INDEX_op_remu_i64: + return TCG_TARGET_HAS_rem_i64; + case INDEX_op_div2_i64: + case INDEX_op_divu2_i64: + return TCG_TARGET_HAS_div2_i64; + case INDEX_op_rotl_i64: + case INDEX_op_rotr_i64: + return TCG_TARGET_HAS_rot_i64; + case INDEX_op_deposit_i64: + return TCG_TARGET_HAS_deposit_i64; + case INDEX_op_extract_i64: + return TCG_TARGET_HAS_extract_i64; + case INDEX_op_sextract_i64: + return TCG_TARGET_HAS_sextract_i64; + case INDEX_op_extrl_i64_i32: + return TCG_TARGET_HAS_extrl_i64_i32; + case INDEX_op_extrh_i64_i32: + return TCG_TARGET_HAS_extrh_i64_i32; + case INDEX_op_ext8s_i64: + return TCG_TARGET_HAS_ext8s_i64; + case INDEX_op_ext16s_i64: + return TCG_TARGET_HAS_ext16s_i64; + case INDEX_op_ext32s_i64: + return TCG_TARGET_HAS_ext32s_i64; + case INDEX_op_ext8u_i64: + return TCG_TARGET_HAS_ext8u_i64; + case INDEX_op_ext16u_i64: + return TCG_TARGET_HAS_ext16u_i64; + case INDEX_op_ext32u_i64: + return TCG_TARGET_HAS_ext32u_i64; + case INDEX_op_bswap16_i64: + return TCG_TARGET_HAS_bswap16_i64; + case INDEX_op_bswap32_i64: + return TCG_TARGET_HAS_bswap32_i64; + case INDEX_op_bswap64_i64: + return TCG_TARGET_HAS_bswap64_i64; + case INDEX_op_not_i64: + return TCG_TARGET_HAS_not_i64; + case INDEX_op_neg_i64: + return TCG_TARGET_HAS_neg_i64; + case INDEX_op_andc_i64: + return TCG_TARGET_HAS_andc_i64; + case INDEX_op_orc_i64: + return TCG_TARGET_HAS_orc_i64; + case INDEX_op_eqv_i64: + return TCG_TARGET_HAS_eqv_i64; + case INDEX_op_nand_i64: + return TCG_TARGET_HAS_nand_i64; + case INDEX_op_nor_i64: + return TCG_TARGET_HAS_nor_i64; + case INDEX_op_clz_i64: + return TCG_TARGET_HAS_clz_i64; + case INDEX_op_ctz_i64: + return TCG_TARGET_HAS_ctz_i64; + case INDEX_op_ctpop_i64: + return TCG_TARGET_HAS_ctpop_i64; + case INDEX_op_add2_i64: + return TCG_TARGET_HAS_add2_i64; + case INDEX_op_sub2_i64: + return TCG_TARGET_HAS_sub2_i64; + case INDEX_op_mulu2_i64: + return TCG_TARGET_HAS_mulu2_i64; + case INDEX_op_muls2_i64: + return TCG_TARGET_HAS_muls2_i64; + case INDEX_op_muluh_i64: + return TCG_TARGET_HAS_muluh_i64; + case INDEX_op_mulsh_i64: + return TCG_TARGET_HAS_mulsh_i64; + + case NB_OPS: + break; + } + g_assert_not_reached(); +} + /* Note: we convert the 64 bit args to 32 bit and do some alignment and endian swap. Maybe it would be better to do the alignment and endian swap in tcg_reg_alloc_call(). */ @@ -2673,9 +2896,7 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb) break; default: /* Sanity check that we've not introduced any unhandled opcodes. */ - if (def->flags & TCG_OPF_NOT_PRESENT) { - tcg_abort(); - } + tcg_debug_assert(tcg_op_supported(opc)); /* Note: in order to speed up the code, it would be much faster to have specialized register allocator functions for some common argument patterns */ diff --git a/tcg/tcg.h b/tcg/tcg.h index ac94133..e342fe6 100644 --- a/tcg/tcg.h +++ b/tcg/tcg.h @@ -932,6 +932,8 @@ do {\ #define tcg_temp_free_ptr(T) tcg_temp_free_i64(TCGV_PTR_TO_NAT(T)) #endif +bool tcg_op_supported(TCGOpcode op); + void tcg_gen_callN(TCGContext *s, void *func, TCGArg ret, int nargs, TCGArg *args); -- cgit v1.1 From ccb1bb66ea2a42e773bfa04178d8b383ff86d4d8 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 11 Sep 2017 11:25:55 -0700 Subject: tcg: Remove tcg_regset_clear MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reviewed-by: Philippe Mathieu-Daudé Signed-off-by: Richard Henderson --- tcg/aarch64/tcg-target.inc.c | 2 +- tcg/arm/tcg-target.inc.c | 2 +- tcg/i386/tcg-target.inc.c | 4 ++-- tcg/mips/tcg-target.inc.c | 2 +- tcg/ppc/tcg-target.inc.c | 2 +- tcg/s390/tcg-target.inc.c | 8 ++++---- tcg/sparc/tcg-target.inc.c | 2 +- tcg/tcg.c | 5 ++--- tcg/tcg.h | 1 - tcg/tci/tcg-target.inc.c | 2 +- 10 files changed, 14 insertions(+), 16 deletions(-) diff --git a/tcg/aarch64/tcg-target.inc.c b/tcg/aarch64/tcg-target.inc.c index c2f3812..75d8192 100644 --- a/tcg/aarch64/tcg-target.inc.c +++ b/tcg/aarch64/tcg-target.inc.c @@ -1940,7 +1940,7 @@ static void tcg_target_init(TCGContext *s) (1 << TCG_REG_X16) | (1 << TCG_REG_X17) | (1 << TCG_REG_X18) | (1 << TCG_REG_X30)); - tcg_regset_clear(s->reserved_regs); + s->reserved_regs = 0; tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); tcg_regset_set_reg(s->reserved_regs, TCG_REG_FP); tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP); diff --git a/tcg/arm/tcg-target.inc.c b/tcg/arm/tcg-target.inc.c index db46aea..f0c1765 100644 --- a/tcg/arm/tcg-target.inc.c +++ b/tcg/arm/tcg-target.inc.c @@ -2173,7 +2173,7 @@ static void tcg_target_init(TCGContext *s) (1 << TCG_REG_R12) | (1 << TCG_REG_R14)); - tcg_regset_clear(s->reserved_regs); + s->reserved_regs = 0; tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP); tcg_regset_set_reg(s->reserved_regs, TCG_REG_PC); diff --git a/tcg/i386/tcg-target.inc.c b/tcg/i386/tcg-target.inc.c index 5231056..0c19ab7 100644 --- a/tcg/i386/tcg-target.inc.c +++ b/tcg/i386/tcg-target.inc.c @@ -2649,7 +2649,7 @@ static void tcg_target_init(TCGContext *s) tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xff); } - tcg_regset_clear(tcg_target_call_clobber_regs); + tcg_target_call_clobber_regs = 0; tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_EAX); tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_EDX); tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_ECX); @@ -2664,7 +2664,7 @@ static void tcg_target_init(TCGContext *s) tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R11); } - tcg_regset_clear(s->reserved_regs); + s->reserved_regs = 0; tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); } diff --git a/tcg/mips/tcg-target.inc.c b/tcg/mips/tcg-target.inc.c index 750baad..85c1abd 100644 --- a/tcg/mips/tcg-target.inc.c +++ b/tcg/mips/tcg-target.inc.c @@ -2629,7 +2629,7 @@ static void tcg_target_init(TCGContext *s) (1 << TCG_REG_T8) | (1 << TCG_REG_T9)); - tcg_regset_clear(s->reserved_regs); + s->reserved_regs = 0; tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO); /* zero register */ tcg_regset_set_reg(s->reserved_regs, TCG_REG_K0); /* kernel use only */ tcg_regset_set_reg(s->reserved_regs, TCG_REG_K1); /* kernel use only */ diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c index 3c93558..44305ba 100644 --- a/tcg/ppc/tcg-target.inc.c +++ b/tcg/ppc/tcg-target.inc.c @@ -2788,7 +2788,7 @@ static void tcg_target_init(TCGContext *s) (1 << TCG_REG_R11) | (1 << TCG_REG_R12)); - tcg_regset_clear(s->reserved_regs); + s->reserved_regs = 0; tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); /* tcg temp */ tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1); /* stack pointer */ #if defined(_CALL_SYSV) diff --git a/tcg/s390/tcg-target.inc.c b/tcg/s390/tcg-target.inc.c index e7ab8e4..01baa33 100644 --- a/tcg/s390/tcg-target.inc.c +++ b/tcg/s390/tcg-target.inc.c @@ -413,12 +413,12 @@ static const char *target_parse_constraint(TCGArgConstraint *ct, break; case 'a': /* force R2 for division */ ct->ct |= TCG_CT_REG; - tcg_regset_clear(ct->u.regs); + ct->u.regs = 0; tcg_regset_set_reg(ct->u.regs, TCG_REG_R2); break; case 'b': /* force R3 for division */ ct->ct |= TCG_CT_REG; - tcg_regset_clear(ct->u.regs); + ct->u.regs = 0; tcg_regset_set_reg(ct->u.regs, TCG_REG_R3); break; case 'A': @@ -2522,7 +2522,7 @@ static void tcg_target_init(TCGContext *s) tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff); tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffff); - tcg_regset_clear(tcg_target_call_clobber_regs); + tcg_target_call_clobber_regs = 0; tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0); tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1); tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2); @@ -2535,7 +2535,7 @@ static void tcg_target_init(TCGContext *s) /* The return register can be considered call-clobbered. */ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14); - tcg_regset_clear(s->reserved_regs); + s->reserved_regs = 0; tcg_regset_set_reg(s->reserved_regs, TCG_TMP0); /* XXX many insns can't be used with R0, so we better avoid it for now */ tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); diff --git a/tcg/sparc/tcg-target.inc.c b/tcg/sparc/tcg-target.inc.c index bd7c146..ccd8320 100644 --- a/tcg/sparc/tcg-target.inc.c +++ b/tcg/sparc/tcg-target.inc.c @@ -1771,7 +1771,7 @@ static void tcg_target_init(TCGContext *s) (1 << TCG_REG_O5) | (1 << TCG_REG_O7)); - tcg_regset_clear(s->reserved_regs); + s->reserved_regs = 0; tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0); /* zero */ tcg_regset_set_reg(s->reserved_regs, TCG_REG_G6); /* reserved for os */ tcg_regset_set_reg(s->reserved_regs, TCG_REG_G7); /* thread pointer */ diff --git a/tcg/tcg.c b/tcg/tcg.c index b65a732..9eeaba9 100644 --- a/tcg/tcg.c +++ b/tcg/tcg.c @@ -1500,7 +1500,7 @@ static void process_op_defs(TCGContext *s) /* Incomplete TCGTargetOpDef entry. */ tcg_debug_assert(ct_str != NULL); - tcg_regset_clear(def->args_ct[i].u.regs); + def->args_ct[i].u.regs = 0; def->args_ct[i].ct = 0; while (*ct_str != '\0') { switch(*ct_str) { @@ -2664,9 +2664,8 @@ static void tcg_reg_alloc_call(TCGContext *s, int nb_oargs, int nb_iargs, tcg_out_mov(s, ts->type, reg, ts->reg); } } else { - TCGRegSet arg_set; + TCGRegSet arg_set = 0; - tcg_regset_clear(arg_set); tcg_regset_set_reg(arg_set, reg); temp_load(s, ts, arg_set, allocated_regs); } diff --git a/tcg/tcg.h b/tcg/tcg.h index e342fe6..6525e51 100644 --- a/tcg/tcg.h +++ b/tcg/tcg.h @@ -186,7 +186,6 @@ typedef enum TCGOpcode { NB_OPS, } TCGOpcode; -#define tcg_regset_clear(d) (d) = 0 #define tcg_regset_set(d, s) (d) = (s) #define tcg_regset_set32(d, reg, val32) (d) |= (val32) << (reg) #define tcg_regset_set_reg(d, r) (d) |= 1L << (r) diff --git a/tcg/tci/tcg-target.inc.c b/tcg/tci/tcg-target.inc.c index 94461b2..f964433 100644 --- a/tcg/tci/tcg-target.inc.c +++ b/tcg/tci/tcg-target.inc.c @@ -879,7 +879,7 @@ static void tcg_target_init(TCGContext *s) tcg_regset_set32(tcg_target_call_clobber_regs, 0, BIT(TCG_TARGET_NB_REGS) - 1); - tcg_regset_clear(s->reserved_regs); + s->reserved_regs = 0; tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); /* We use negative offsets from "sp" so that we can distinguish -- cgit v1.1 From d21369f5fb41299d5e7b032ec6da12da7f95f72f Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 11 Sep 2017 11:58:44 -0700 Subject: tcg: Remove tcg_regset_set MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reviewed-by: Philippe Mathieu-Daudé Signed-off-by: Richard Henderson --- tcg/mips/tcg-target.inc.c | 47 ++++++++++++++++++++++++----------------------- tcg/tcg.c | 8 ++++---- tcg/tcg.h | 1 - 3 files changed, 28 insertions(+), 28 deletions(-) diff --git a/tcg/mips/tcg-target.inc.c b/tcg/mips/tcg-target.inc.c index 85c1abd..1c09ec7 100644 --- a/tcg/mips/tcg-target.inc.c +++ b/tcg/mips/tcg-target.inc.c @@ -195,11 +195,11 @@ static const char *target_parse_constraint(TCGArgConstraint *ct, switch(*ct_str++) { case 'r': ct->ct |= TCG_CT_REG; - tcg_regset_set(ct->u.regs, 0xffffffff); + ct->u.regs = 0xffffffff; break; case 'L': /* qemu_ld input arg constraint */ ct->ct |= TCG_CT_REG; - tcg_regset_set(ct->u.regs, 0xffffffff); + ct->u.regs = 0xffffffff; tcg_regset_reset_reg(ct->u.regs, TCG_REG_A0); #if defined(CONFIG_SOFTMMU) if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { @@ -209,7 +209,7 @@ static const char *target_parse_constraint(TCGArgConstraint *ct, break; case 'S': /* qemu_st constraint */ ct->ct |= TCG_CT_REG; - tcg_regset_set(ct->u.regs, 0xffffffff); + ct->u.regs = 0xffffffff; tcg_regset_reset_reg(ct->u.regs, TCG_REG_A0); #if defined(CONFIG_SOFTMMU) if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { @@ -2607,27 +2607,28 @@ static void tcg_target_qemu_prologue(TCGContext *s) static void tcg_target_init(TCGContext *s) { tcg_target_detect_isa(); - tcg_regset_set(tcg_target_available_regs[TCG_TYPE_I32], 0xffffffff); + tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff; if (TCG_TARGET_REG_BITS == 64) { - tcg_regset_set(tcg_target_available_regs[TCG_TYPE_I64], 0xffffffff); - } - tcg_regset_set(tcg_target_call_clobber_regs, - (1 << TCG_REG_V0) | - (1 << TCG_REG_V1) | - (1 << TCG_REG_A0) | - (1 << TCG_REG_A1) | - (1 << TCG_REG_A2) | - (1 << TCG_REG_A3) | - (1 << TCG_REG_T0) | - (1 << TCG_REG_T1) | - (1 << TCG_REG_T2) | - (1 << TCG_REG_T3) | - (1 << TCG_REG_T4) | - (1 << TCG_REG_T5) | - (1 << TCG_REG_T6) | - (1 << TCG_REG_T7) | - (1 << TCG_REG_T8) | - (1 << TCG_REG_T9)); + tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff; + } + + tcg_target_call_clobber_regs = 0; + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V0); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V1); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_A0); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_A1); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_A2); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_A3); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_T0); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_T1); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_T2); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_T3); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_T4); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_T5); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_T6); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_T7); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_T8); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_T9); s->reserved_regs = 0; tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO); /* zero register */ diff --git a/tcg/tcg.c b/tcg/tcg.c index 9eeaba9..a4badcc 100644 --- a/tcg/tcg.c +++ b/tcg/tcg.c @@ -2362,7 +2362,7 @@ static void tcg_reg_alloc_mov(TCGContext *s, const TCGOpDef *def, TCGTemp *ts, *ots; TCGType otype, itype; - tcg_regset_set(allocated_regs, s->reserved_regs); + allocated_regs = s->reserved_regs; ots = &s->temps[args[0]]; ts = &s->temps[args[1]]; @@ -2450,8 +2450,8 @@ static void tcg_reg_alloc_op(TCGContext *s, args + nb_oargs + nb_iargs, sizeof(TCGArg) * def->nb_cargs); - tcg_regset_set(i_allocated_regs, s->reserved_regs); - tcg_regset_set(o_allocated_regs, s->reserved_regs); + i_allocated_regs = s->reserved_regs; + o_allocated_regs = s->reserved_regs; /* satisfy input constraints */ for(k = 0; k < nb_iargs; k++) { @@ -2651,7 +2651,7 @@ static void tcg_reg_alloc_call(TCGContext *s, int nb_oargs, int nb_iargs, } /* assign input registers */ - tcg_regset_set(allocated_regs, s->reserved_regs); + allocated_regs = s->reserved_regs; for(i = 0; i < nb_regs; i++) { arg = args[nb_oargs + i]; if (arg != TCG_CALL_DUMMY_ARG) { diff --git a/tcg/tcg.h b/tcg/tcg.h index 6525e51..5bf6767 100644 --- a/tcg/tcg.h +++ b/tcg/tcg.h @@ -186,7 +186,6 @@ typedef enum TCGOpcode { NB_OPS, } TCGOpcode; -#define tcg_regset_set(d, s) (d) = (s) #define tcg_regset_set32(d, reg, val32) (d) |= (val32) << (reg) #define tcg_regset_set_reg(d, r) (d) |= 1L << (r) #define tcg_regset_reset_reg(d, r) (d) &= ~(1L << (r)) -- cgit v1.1 From 07ddf036fa66bca279590c09fe1c46bcdcc5bcff Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 11 Sep 2017 12:08:13 -0700 Subject: tcg: Remove tcg_regset_{or,and,andnot,not} MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reviewed-by: Philippe Mathieu-Daudé Signed-off-by: Richard Henderson --- tcg/tcg.c | 2 +- tcg/tcg.h | 4 ---- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/tcg/tcg.c b/tcg/tcg.c index a4badcc..dff9999 100644 --- a/tcg/tcg.c +++ b/tcg/tcg.c @@ -2216,7 +2216,7 @@ static TCGReg tcg_reg_alloc(TCGContext *s, TCGRegSet desired_regs, TCGReg reg; TCGRegSet reg_ct; - tcg_regset_andnot(reg_ct, desired_regs, allocated_regs); + reg_ct = desired_regs & ~allocated_regs; order = rev ? indirect_reg_alloc_order : tcg_target_reg_alloc_order; /* first try free registers */ diff --git a/tcg/tcg.h b/tcg/tcg.h index 5bf6767..006e988 100644 --- a/tcg/tcg.h +++ b/tcg/tcg.h @@ -190,10 +190,6 @@ typedef enum TCGOpcode { #define tcg_regset_set_reg(d, r) (d) |= 1L << (r) #define tcg_regset_reset_reg(d, r) (d) &= ~(1L << (r)) #define tcg_regset_test_reg(d, r) (((d) >> (r)) & 1) -#define tcg_regset_or(d, a, b) (d) = (a) | (b) -#define tcg_regset_and(d, a, b) (d) = (a) & (b) -#define tcg_regset_andnot(d, a, b) (d) = (a) & ~(b) -#define tcg_regset_not(d, a) (d) = ~(a) #ifndef TCG_TARGET_INSN_UNIT_SIZE # error "Missing TCG_TARGET_INSN_UNIT_SIZE" -- cgit v1.1 From f46934df662182097dce07d57ec00f37e4d2abf1 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 11 Sep 2017 12:44:30 -0700 Subject: tcg: Remove tcg_regset_set32 It's not even clear what the interface REG and VAL32 were supposed to mean. All uses had REG = 0 and VAL32 was the bitset assigned to the destination. Signed-off-by: Richard Henderson --- tcg/aarch64/tcg-target.inc.c | 33 +++++++++++++++--------------- tcg/arm/tcg-target.inc.c | 23 +++++++++++---------- tcg/i386/tcg-target.inc.c | 26 +++++++----------------- tcg/ppc/tcg-target.inc.c | 37 +++++++++++++++++----------------- tcg/s390/tcg-target.inc.c | 14 ++++++------- tcg/sparc/tcg-target.inc.c | 48 ++++++++++++++++++++++---------------------- tcg/tcg.h | 1 - tcg/tci/tcg-target.inc.c | 11 ++++------ 8 files changed, 90 insertions(+), 103 deletions(-) diff --git a/tcg/aarch64/tcg-target.inc.c b/tcg/aarch64/tcg-target.inc.c index 75d8192..704fcdd 100644 --- a/tcg/aarch64/tcg-target.inc.c +++ b/tcg/aarch64/tcg-target.inc.c @@ -121,11 +121,11 @@ static const char *target_parse_constraint(TCGArgConstraint *ct, switch (*ct_str++) { case 'r': ct->ct |= TCG_CT_REG; - tcg_regset_set32(ct->u.regs, 0, (1ULL << TCG_TARGET_NB_REGS) - 1); + ct->u.regs = 0xffffffffu; break; case 'l': /* qemu_ld / qemu_st address, data_reg */ ct->ct |= TCG_CT_REG; - tcg_regset_set32(ct->u.regs, 0, (1ULL << TCG_TARGET_NB_REGS) - 1); + ct->u.regs = 0xffffffffu; #ifdef CONFIG_SOFTMMU /* x0 and x1 will be overwritten when reading the tlb entry, and x2, and x3 for helper args, better to avoid using them. */ @@ -1925,20 +1925,21 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) static void tcg_target_init(TCGContext *s) { - tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff); - tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffffffff); - - tcg_regset_set32(tcg_target_call_clobber_regs, 0, - (1 << TCG_REG_X0) | (1 << TCG_REG_X1) | - (1 << TCG_REG_X2) | (1 << TCG_REG_X3) | - (1 << TCG_REG_X4) | (1 << TCG_REG_X5) | - (1 << TCG_REG_X6) | (1 << TCG_REG_X7) | - (1 << TCG_REG_X8) | (1 << TCG_REG_X9) | - (1 << TCG_REG_X10) | (1 << TCG_REG_X11) | - (1 << TCG_REG_X12) | (1 << TCG_REG_X13) | - (1 << TCG_REG_X14) | (1 << TCG_REG_X15) | - (1 << TCG_REG_X16) | (1 << TCG_REG_X17) | - (1 << TCG_REG_X18) | (1 << TCG_REG_X30)); + tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffffu; + tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffffu; + + tcg_target_call_clobber_regs = 0xfffffffu; + tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X19); + tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X20); + tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X21); + tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X22); + tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X23); + tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X24); + tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X25); + tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X26); + tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X27); + tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X28); + tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X29); s->reserved_regs = 0; tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); diff --git a/tcg/arm/tcg-target.inc.c b/tcg/arm/tcg-target.inc.c index f0c1765..14599a8 100644 --- a/tcg/arm/tcg-target.inc.c +++ b/tcg/arm/tcg-target.inc.c @@ -264,13 +264,13 @@ static const char *target_parse_constraint(TCGArgConstraint *ct, case 'r': ct->ct |= TCG_CT_REG; - tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1); + ct->u.regs = 0xffff; break; /* qemu_ld address */ case 'l': ct->ct |= TCG_CT_REG; - tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1); + ct->u.regs = 0xffff; #ifdef CONFIG_SOFTMMU /* r0-r2,lr will be overwritten when reading the tlb entry, so don't use these. */ @@ -284,7 +284,7 @@ static const char *target_parse_constraint(TCGArgConstraint *ct, /* qemu_st address & data */ case 's': ct->ct |= TCG_CT_REG; - tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1); + ct->u.regs = 0xffff; /* r0-r2 will be overwritten when reading the tlb entry (softmmu only) and r0-r1 doing the byte swapping, so don't use these. */ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0); @@ -2164,14 +2164,15 @@ static void tcg_target_init(TCGContext *s) } } - tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff); - tcg_regset_set32(tcg_target_call_clobber_regs, 0, - (1 << TCG_REG_R0) | - (1 << TCG_REG_R1) | - (1 << TCG_REG_R2) | - (1 << TCG_REG_R3) | - (1 << TCG_REG_R12) | - (1 << TCG_REG_R14)); + tcg_target_available_regs[TCG_TYPE_I32] = 0xffff; + + tcg_target_call_clobber_regs = 0; + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R12); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14); s->reserved_regs = 0; tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); diff --git a/tcg/i386/tcg-target.inc.c b/tcg/i386/tcg-target.inc.c index 0c19ab7..69e49c9 100644 --- a/tcg/i386/tcg-target.inc.c +++ b/tcg/i386/tcg-target.inc.c @@ -193,23 +193,15 @@ static const char *target_parse_constraint(TCGArgConstraint *ct, break; case 'q': ct->ct |= TCG_CT_REG; - if (TCG_TARGET_REG_BITS == 64) { - tcg_regset_set32(ct->u.regs, 0, 0xffff); - } else { - tcg_regset_set32(ct->u.regs, 0, 0xf); - } + ct->u.regs = TCG_TARGET_REG_BITS == 64 ? 0xffff : 0xf; break; case 'Q': ct->ct |= TCG_CT_REG; - tcg_regset_set32(ct->u.regs, 0, 0xf); + ct->u.regs = 0xf; break; case 'r': ct->ct |= TCG_CT_REG; - if (TCG_TARGET_REG_BITS == 64) { - tcg_regset_set32(ct->u.regs, 0, 0xffff); - } else { - tcg_regset_set32(ct->u.regs, 0, 0xff); - } + ct->u.regs = TCG_TARGET_REG_BITS == 64 ? 0xffff : 0xff; break; case 'W': /* With TZCNT/LZCNT, we can have operand-size as an input. */ @@ -219,11 +211,7 @@ static const char *target_parse_constraint(TCGArgConstraint *ct, /* qemu_ld/st address constraint */ case 'L': ct->ct |= TCG_CT_REG; - if (TCG_TARGET_REG_BITS == 64) { - tcg_regset_set32(ct->u.regs, 0, 0xffff); - } else { - tcg_regset_set32(ct->u.regs, 0, 0xff); - } + ct->u.regs = TCG_TARGET_REG_BITS == 64 ? 0xffff : 0xff; tcg_regset_reset_reg(ct->u.regs, TCG_REG_L0); tcg_regset_reset_reg(ct->u.regs, TCG_REG_L1); break; @@ -2643,10 +2631,10 @@ static void tcg_target_init(TCGContext *s) #endif /* CONFIG_CPUID_H */ if (TCG_TARGET_REG_BITS == 64) { - tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff); - tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffff); + tcg_target_available_regs[TCG_TYPE_I32] = 0xffff; + tcg_target_available_regs[TCG_TYPE_I64] = 0xffff; } else { - tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xff); + tcg_target_available_regs[TCG_TYPE_I32] = 0xff; } tcg_target_call_clobber_regs = 0; diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c index 44305ba..8ffc7a7 100644 --- a/tcg/ppc/tcg-target.inc.c +++ b/tcg/ppc/tcg-target.inc.c @@ -260,11 +260,11 @@ static const char *target_parse_constraint(TCGArgConstraint *ct, break; case 'r': ct->ct |= TCG_CT_REG; - tcg_regset_set32(ct->u.regs, 0, 0xffffffff); + ct->u.regs = 0xffffffff; break; case 'L': /* qemu_ld constraint */ ct->ct |= TCG_CT_REG; - tcg_regset_set32(ct->u.regs, 0, 0xffffffff); + ct->u.regs = 0xffffffff; tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3); #ifdef CONFIG_SOFTMMU tcg_regset_reset_reg(ct->u.regs, TCG_REG_R4); @@ -273,7 +273,7 @@ static const char *target_parse_constraint(TCGArgConstraint *ct, break; case 'S': /* qemu_st constraint */ ct->ct |= TCG_CT_REG; - tcg_regset_set32(ct->u.regs, 0, 0xffffffff); + ct->u.regs = 0xffffffff; tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3); #ifdef CONFIG_SOFTMMU tcg_regset_reset_reg(ct->u.regs, TCG_REG_R4); @@ -2772,21 +2772,22 @@ static void tcg_target_init(TCGContext *s) } #endif - tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff); - tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffffffff); - tcg_regset_set32(tcg_target_call_clobber_regs, 0, - (1 << TCG_REG_R0) | - (1 << TCG_REG_R2) | - (1 << TCG_REG_R3) | - (1 << TCG_REG_R4) | - (1 << TCG_REG_R5) | - (1 << TCG_REG_R6) | - (1 << TCG_REG_R7) | - (1 << TCG_REG_R8) | - (1 << TCG_REG_R9) | - (1 << TCG_REG_R10) | - (1 << TCG_REG_R11) | - (1 << TCG_REG_R12)); + tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff; + tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff; + + tcg_target_call_clobber_regs = 0; + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R4); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R5); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R6); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R7); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R8); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R9); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R10); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R11); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R12); s->reserved_regs = 0; tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); /* tcg temp */ diff --git a/tcg/s390/tcg-target.inc.c b/tcg/s390/tcg-target.inc.c index 01baa33..38a7cda 100644 --- a/tcg/s390/tcg-target.inc.c +++ b/tcg/s390/tcg-target.inc.c @@ -402,14 +402,14 @@ static const char *target_parse_constraint(TCGArgConstraint *ct, switch (*ct_str++) { case 'r': /* all registers */ ct->ct |= TCG_CT_REG; - tcg_regset_set32(ct->u.regs, 0, 0xffff); + ct->u.regs = 0xffff; break; case 'L': /* qemu_ld/st constraint */ ct->ct |= TCG_CT_REG; - tcg_regset_set32(ct->u.regs, 0, 0xffff); - tcg_regset_reset_reg (ct->u.regs, TCG_REG_R2); - tcg_regset_reset_reg (ct->u.regs, TCG_REG_R3); - tcg_regset_reset_reg (ct->u.regs, TCG_REG_R4); + ct->u.regs = 0xffff; + tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2); + tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3); + tcg_regset_reset_reg(ct->u.regs, TCG_REG_R4); break; case 'a': /* force R2 for division */ ct->ct |= TCG_CT_REG; @@ -2519,8 +2519,8 @@ static void tcg_target_init(TCGContext *s) { query_s390_facilities(); - tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff); - tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffff); + tcg_target_available_regs[TCG_TYPE_I32] = 0xffff; + tcg_target_available_regs[TCG_TYPE_I64] = 0xffff; tcg_target_call_clobber_regs = 0; tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0); diff --git a/tcg/sparc/tcg-target.inc.c b/tcg/sparc/tcg-target.inc.c index ccd8320..1da4deb 100644 --- a/tcg/sparc/tcg-target.inc.c +++ b/tcg/sparc/tcg-target.inc.c @@ -343,16 +343,15 @@ static const char *target_parse_constraint(TCGArgConstraint *ct, switch (*ct_str++) { case 'r': ct->ct |= TCG_CT_REG; - tcg_regset_set32(ct->u.regs, 0, 0xffffffff); + ct->u.regs = 0xffffffff; break; case 'R': ct->ct |= TCG_CT_REG; - tcg_regset_set32(ct->u.regs, 0, ALL_64); + ct->u.regs = ALL_64; break; case 'A': /* qemu_ld/st address constraint */ ct->ct |= TCG_CT_REG; - tcg_regset_set32(ct->u.regs, 0, - TARGET_LONG_BITS == 64 ? ALL_64 : 0xffffffff); + ct->u.regs = TARGET_LONG_BITS == 64 ? ALL_64 : 0xffffffff; reserve_helpers: tcg_regset_reset_reg(ct->u.regs, TCG_REG_O0); tcg_regset_reset_reg(ct->u.regs, TCG_REG_O1); @@ -360,11 +359,11 @@ static const char *target_parse_constraint(TCGArgConstraint *ct, break; case 's': /* qemu_st data 32-bit constraint */ ct->ct |= TCG_CT_REG; - tcg_regset_set32(ct->u.regs, 0, 0xffffffff); + ct->u.regs = 0xffffffff; goto reserve_helpers; case 'S': /* qemu_st data 64-bit constraint */ ct->ct |= TCG_CT_REG; - tcg_regset_set32(ct->u.regs, 0, ALL_64); + ct->u.regs = ALL_64; goto reserve_helpers; case 'I': ct->ct |= TCG_CT_CONST_S11; @@ -1752,24 +1751,25 @@ static void tcg_target_init(TCGContext *s) } #endif - tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff); - tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, ALL_64); - - tcg_regset_set32(tcg_target_call_clobber_regs, 0, - (1 << TCG_REG_G1) | - (1 << TCG_REG_G2) | - (1 << TCG_REG_G3) | - (1 << TCG_REG_G4) | - (1 << TCG_REG_G5) | - (1 << TCG_REG_G6) | - (1 << TCG_REG_G7) | - (1 << TCG_REG_O0) | - (1 << TCG_REG_O1) | - (1 << TCG_REG_O2) | - (1 << TCG_REG_O3) | - (1 << TCG_REG_O4) | - (1 << TCG_REG_O5) | - (1 << TCG_REG_O7)); + tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff; + tcg_target_available_regs[TCG_TYPE_I64] = ALL_64; + + tcg_target_call_clobber_regs = 0; + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G1); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G2); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G3); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G4); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G5); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G6); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G7); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O0); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O1); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O2); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O3); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O4); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O5); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O6); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O7); s->reserved_regs = 0; tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0); /* zero */ diff --git a/tcg/tcg.h b/tcg/tcg.h index 006e988..50dfd7c 100644 --- a/tcg/tcg.h +++ b/tcg/tcg.h @@ -186,7 +186,6 @@ typedef enum TCGOpcode { NB_OPS, } TCGOpcode; -#define tcg_regset_set32(d, reg, val32) (d) |= (val32) << (reg) #define tcg_regset_set_reg(d, r) (d) |= 1L << (r) #define tcg_regset_reset_reg(d, r) (d) &= ~(1L << (r)) #define tcg_regset_test_reg(d, r) (((d) >> (r)) & 1) diff --git a/tcg/tci/tcg-target.inc.c b/tcg/tci/tcg-target.inc.c index f964433..913c380 100644 --- a/tcg/tci/tcg-target.inc.c +++ b/tcg/tci/tcg-target.inc.c @@ -390,7 +390,7 @@ static const char *target_parse_constraint(TCGArgConstraint *ct, case 'L': /* qemu_ld constraint */ case 'S': /* qemu_st constraint */ ct->ct |= TCG_CT_REG; - tcg_regset_set32(ct->u.regs, 0, BIT(TCG_TARGET_NB_REGS) - 1); + ct->u.regs = BIT(TCG_TARGET_NB_REGS) - 1; break; default: return NULL; @@ -870,14 +870,11 @@ static void tcg_target_init(TCGContext *s) tcg_debug_assert(tcg_op_defs_max <= UINT8_MAX); /* Registers available for 32 bit operations. */ - tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, - BIT(TCG_TARGET_NB_REGS) - 1); + tcg_target_available_regs[TCG_TYPE_I32] = BIT(TCG_TARGET_NB_REGS) - 1; /* Registers available for 64 bit operations. */ - tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, - BIT(TCG_TARGET_NB_REGS) - 1); + tcg_target_available_regs[TCG_TYPE_I64] = BIT(TCG_TARGET_NB_REGS) - 1; /* TODO: Which registers should be set here? */ - tcg_regset_set32(tcg_target_call_clobber_regs, 0, - BIT(TCG_TARGET_NB_REGS) - 1); + tcg_target_call_clobber_regs = BIT(TCG_TARGET_NB_REGS) - 1; s->reserved_regs = 0; tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); -- cgit v1.1 From 80a8b9a910e14d4a1937f70dce944891990f3441 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 11 Sep 2017 12:50:42 -0700 Subject: tcg: Fix types in tcg_regset_{set,reset}_reg There was a potential problem here with an ILP32 host with 64 host registers. Signed-off-by: Richard Henderson --- tcg/tcg.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tcg/tcg.h b/tcg/tcg.h index 50dfd7c..25662c3 100644 --- a/tcg/tcg.h +++ b/tcg/tcg.h @@ -186,9 +186,9 @@ typedef enum TCGOpcode { NB_OPS, } TCGOpcode; -#define tcg_regset_set_reg(d, r) (d) |= 1L << (r) -#define tcg_regset_reset_reg(d, r) (d) &= ~(1L << (r)) -#define tcg_regset_test_reg(d, r) (((d) >> (r)) & 1) +#define tcg_regset_set_reg(d, r) ((d) |= (TCGRegSet)1 << (r)) +#define tcg_regset_reset_reg(d, r) ((d) &= ~((TCGRegSet)1 << (r))) +#define tcg_regset_test_reg(d, r) (((d) >> (r)) & 1) #ifndef TCG_TARGET_INSN_UNIT_SIZE # error "Missing TCG_TARGET_INSN_UNIT_SIZE" -- cgit v1.1 From 1897cc2eb8be2d8be23380b45a2d3c1a2808723f Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 11 Sep 2017 10:27:34 -0700 Subject: tcg/aarch64: Fully convert tcg_target_op_def Signed-off-by: Richard Henderson --- tcg/aarch64/tcg-target.inc.c | 282 +++++++++++++++++++++++-------------------- 1 file changed, 151 insertions(+), 131 deletions(-) diff --git a/tcg/aarch64/tcg-target.inc.c b/tcg/aarch64/tcg-target.inc.c index 704fcdd..150530f 100644 --- a/tcg/aarch64/tcg-target.inc.c +++ b/tcg/aarch64/tcg-target.inc.c @@ -1786,141 +1786,161 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, #undef REG0 } -static const TCGTargetOpDef aarch64_op_defs[] = { - { INDEX_op_exit_tb, { } }, - { INDEX_op_goto_tb, { } }, - { INDEX_op_br, { } }, - { INDEX_op_goto_ptr, { "r" } }, - - { INDEX_op_ld8u_i32, { "r", "r" } }, - { INDEX_op_ld8s_i32, { "r", "r" } }, - { INDEX_op_ld16u_i32, { "r", "r" } }, - { INDEX_op_ld16s_i32, { "r", "r" } }, - { INDEX_op_ld_i32, { "r", "r" } }, - { INDEX_op_ld8u_i64, { "r", "r" } }, - { INDEX_op_ld8s_i64, { "r", "r" } }, - { INDEX_op_ld16u_i64, { "r", "r" } }, - { INDEX_op_ld16s_i64, { "r", "r" } }, - { INDEX_op_ld32u_i64, { "r", "r" } }, - { INDEX_op_ld32s_i64, { "r", "r" } }, - { INDEX_op_ld_i64, { "r", "r" } }, - - { INDEX_op_st8_i32, { "rZ", "r" } }, - { INDEX_op_st16_i32, { "rZ", "r" } }, - { INDEX_op_st_i32, { "rZ", "r" } }, - { INDEX_op_st8_i64, { "rZ", "r" } }, - { INDEX_op_st16_i64, { "rZ", "r" } }, - { INDEX_op_st32_i64, { "rZ", "r" } }, - { INDEX_op_st_i64, { "rZ", "r" } }, - - { INDEX_op_add_i32, { "r", "r", "rA" } }, - { INDEX_op_add_i64, { "r", "r", "rA" } }, - { INDEX_op_sub_i32, { "r", "r", "rA" } }, - { INDEX_op_sub_i64, { "r", "r", "rA" } }, - { INDEX_op_mul_i32, { "r", "r", "r" } }, - { INDEX_op_mul_i64, { "r", "r", "r" } }, - { INDEX_op_div_i32, { "r", "r", "r" } }, - { INDEX_op_div_i64, { "r", "r", "r" } }, - { INDEX_op_divu_i32, { "r", "r", "r" } }, - { INDEX_op_divu_i64, { "r", "r", "r" } }, - { INDEX_op_rem_i32, { "r", "r", "r" } }, - { INDEX_op_rem_i64, { "r", "r", "r" } }, - { INDEX_op_remu_i32, { "r", "r", "r" } }, - { INDEX_op_remu_i64, { "r", "r", "r" } }, - { INDEX_op_and_i32, { "r", "r", "rL" } }, - { INDEX_op_and_i64, { "r", "r", "rL" } }, - { INDEX_op_or_i32, { "r", "r", "rL" } }, - { INDEX_op_or_i64, { "r", "r", "rL" } }, - { INDEX_op_xor_i32, { "r", "r", "rL" } }, - { INDEX_op_xor_i64, { "r", "r", "rL" } }, - { INDEX_op_andc_i32, { "r", "r", "rL" } }, - { INDEX_op_andc_i64, { "r", "r", "rL" } }, - { INDEX_op_orc_i32, { "r", "r", "rL" } }, - { INDEX_op_orc_i64, { "r", "r", "rL" } }, - { INDEX_op_eqv_i32, { "r", "r", "rL" } }, - { INDEX_op_eqv_i64, { "r", "r", "rL" } }, - - { INDEX_op_neg_i32, { "r", "r" } }, - { INDEX_op_neg_i64, { "r", "r" } }, - { INDEX_op_not_i32, { "r", "r" } }, - { INDEX_op_not_i64, { "r", "r" } }, - - { INDEX_op_shl_i32, { "r", "r", "ri" } }, - { INDEX_op_shr_i32, { "r", "r", "ri" } }, - { INDEX_op_sar_i32, { "r", "r", "ri" } }, - { INDEX_op_rotl_i32, { "r", "r", "ri" } }, - { INDEX_op_rotr_i32, { "r", "r", "ri" } }, - { INDEX_op_clz_i32, { "r", "r", "rAL" } }, - { INDEX_op_ctz_i32, { "r", "r", "rAL" } }, - { INDEX_op_shl_i64, { "r", "r", "ri" } }, - { INDEX_op_shr_i64, { "r", "r", "ri" } }, - { INDEX_op_sar_i64, { "r", "r", "ri" } }, - { INDEX_op_rotl_i64, { "r", "r", "ri" } }, - { INDEX_op_rotr_i64, { "r", "r", "ri" } }, - { INDEX_op_clz_i64, { "r", "r", "rAL" } }, - { INDEX_op_ctz_i64, { "r", "r", "rAL" } }, - - { INDEX_op_brcond_i32, { "r", "rA" } }, - { INDEX_op_brcond_i64, { "r", "rA" } }, - { INDEX_op_setcond_i32, { "r", "r", "rA" } }, - { INDEX_op_setcond_i64, { "r", "r", "rA" } }, - { INDEX_op_movcond_i32, { "r", "r", "rA", "rZ", "rZ" } }, - { INDEX_op_movcond_i64, { "r", "r", "rA", "rZ", "rZ" } }, - - { INDEX_op_qemu_ld_i32, { "r", "l" } }, - { INDEX_op_qemu_ld_i64, { "r", "l" } }, - { INDEX_op_qemu_st_i32, { "lZ", "l" } }, - { INDEX_op_qemu_st_i64, { "lZ", "l" } }, - - { INDEX_op_bswap16_i32, { "r", "r" } }, - { INDEX_op_bswap32_i32, { "r", "r" } }, - { INDEX_op_bswap16_i64, { "r", "r" } }, - { INDEX_op_bswap32_i64, { "r", "r" } }, - { INDEX_op_bswap64_i64, { "r", "r" } }, - - { INDEX_op_ext8s_i32, { "r", "r" } }, - { INDEX_op_ext16s_i32, { "r", "r" } }, - { INDEX_op_ext8u_i32, { "r", "r" } }, - { INDEX_op_ext16u_i32, { "r", "r" } }, - - { INDEX_op_ext8s_i64, { "r", "r" } }, - { INDEX_op_ext16s_i64, { "r", "r" } }, - { INDEX_op_ext32s_i64, { "r", "r" } }, - { INDEX_op_ext8u_i64, { "r", "r" } }, - { INDEX_op_ext16u_i64, { "r", "r" } }, - { INDEX_op_ext32u_i64, { "r", "r" } }, - { INDEX_op_ext_i32_i64, { "r", "r" } }, - { INDEX_op_extu_i32_i64, { "r", "r" } }, - - { INDEX_op_deposit_i32, { "r", "0", "rZ" } }, - { INDEX_op_deposit_i64, { "r", "0", "rZ" } }, - { INDEX_op_extract_i32, { "r", "r" } }, - { INDEX_op_extract_i64, { "r", "r" } }, - { INDEX_op_sextract_i32, { "r", "r" } }, - { INDEX_op_sextract_i64, { "r", "r" } }, - - { INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rA", "rMZ" } }, - { INDEX_op_add2_i64, { "r", "r", "rZ", "rZ", "rA", "rMZ" } }, - { INDEX_op_sub2_i32, { "r", "r", "rZ", "rZ", "rA", "rMZ" } }, - { INDEX_op_sub2_i64, { "r", "r", "rZ", "rZ", "rA", "rMZ" } }, - - { INDEX_op_muluh_i64, { "r", "r", "r" } }, - { INDEX_op_mulsh_i64, { "r", "r", "r" } }, - - { INDEX_op_mb, { } }, - { -1 }, -}; - static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) { - int i, n = ARRAY_SIZE(aarch64_op_defs); + static const TCGTargetOpDef r = { .args_ct_str = { "r" } }; + static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } }; + static const TCGTargetOpDef r_l = { .args_ct_str = { "r", "l" } }; + static const TCGTargetOpDef r_rA = { .args_ct_str = { "r", "rA" } }; + static const TCGTargetOpDef rZ_r = { .args_ct_str = { "rZ", "r" } }; + static const TCGTargetOpDef lZ_l = { .args_ct_str = { "lZ", "l" } }; + static const TCGTargetOpDef r_r_r = { .args_ct_str = { "r", "r", "r" } }; + static const TCGTargetOpDef r_r_ri = { .args_ct_str = { "r", "r", "ri" } }; + static const TCGTargetOpDef r_r_rA = { .args_ct_str = { "r", "r", "rA" } }; + static const TCGTargetOpDef r_r_rL = { .args_ct_str = { "r", "r", "rL" } }; + static const TCGTargetOpDef r_r_rAL + = { .args_ct_str = { "r", "r", "rAL" } }; + static const TCGTargetOpDef dep + = { .args_ct_str = { "r", "0", "rZ" } }; + static const TCGTargetOpDef movc + = { .args_ct_str = { "r", "r", "rA", "rZ", "rZ" } }; + static const TCGTargetOpDef add2 + = { .args_ct_str = { "r", "r", "rZ", "rZ", "rA", "rMZ" } }; + + switch (op) { + case INDEX_op_goto_ptr: + return &r; - for (i = 0; i < n; ++i) { - if (aarch64_op_defs[i].op == op) { - return &aarch64_op_defs[i]; - } + case INDEX_op_ld8u_i32: + case INDEX_op_ld8s_i32: + case INDEX_op_ld16u_i32: + case INDEX_op_ld16s_i32: + case INDEX_op_ld_i32: + case INDEX_op_ld8u_i64: + case INDEX_op_ld8s_i64: + case INDEX_op_ld16u_i64: + case INDEX_op_ld16s_i64: + case INDEX_op_ld32u_i64: + case INDEX_op_ld32s_i64: + case INDEX_op_ld_i64: + case INDEX_op_neg_i32: + case INDEX_op_neg_i64: + case INDEX_op_not_i32: + case INDEX_op_not_i64: + case INDEX_op_bswap16_i32: + case INDEX_op_bswap32_i32: + case INDEX_op_bswap16_i64: + case INDEX_op_bswap32_i64: + case INDEX_op_bswap64_i64: + case INDEX_op_ext8s_i32: + case INDEX_op_ext16s_i32: + case INDEX_op_ext8u_i32: + case INDEX_op_ext16u_i32: + case INDEX_op_ext8s_i64: + case INDEX_op_ext16s_i64: + case INDEX_op_ext32s_i64: + case INDEX_op_ext8u_i64: + case INDEX_op_ext16u_i64: + case INDEX_op_ext32u_i64: + case INDEX_op_ext_i32_i64: + case INDEX_op_extu_i32_i64: + case INDEX_op_extract_i32: + case INDEX_op_extract_i64: + case INDEX_op_sextract_i32: + case INDEX_op_sextract_i64: + return &r_r; + + case INDEX_op_st8_i32: + case INDEX_op_st16_i32: + case INDEX_op_st_i32: + case INDEX_op_st8_i64: + case INDEX_op_st16_i64: + case INDEX_op_st32_i64: + case INDEX_op_st_i64: + return &rZ_r; + + case INDEX_op_add_i32: + case INDEX_op_add_i64: + case INDEX_op_sub_i32: + case INDEX_op_sub_i64: + case INDEX_op_setcond_i32: + case INDEX_op_setcond_i64: + return &r_r_rA; + + case INDEX_op_mul_i32: + case INDEX_op_mul_i64: + case INDEX_op_div_i32: + case INDEX_op_div_i64: + case INDEX_op_divu_i32: + case INDEX_op_divu_i64: + case INDEX_op_rem_i32: + case INDEX_op_rem_i64: + case INDEX_op_remu_i32: + case INDEX_op_remu_i64: + case INDEX_op_muluh_i64: + case INDEX_op_mulsh_i64: + return &r_r_r; + + case INDEX_op_and_i32: + case INDEX_op_and_i64: + case INDEX_op_or_i32: + case INDEX_op_or_i64: + case INDEX_op_xor_i32: + case INDEX_op_xor_i64: + case INDEX_op_andc_i32: + case INDEX_op_andc_i64: + case INDEX_op_orc_i32: + case INDEX_op_orc_i64: + case INDEX_op_eqv_i32: + case INDEX_op_eqv_i64: + return &r_r_rL; + + case INDEX_op_shl_i32: + case INDEX_op_shr_i32: + case INDEX_op_sar_i32: + case INDEX_op_rotl_i32: + case INDEX_op_rotr_i32: + case INDEX_op_shl_i64: + case INDEX_op_shr_i64: + case INDEX_op_sar_i64: + case INDEX_op_rotl_i64: + case INDEX_op_rotr_i64: + return &r_r_ri; + + case INDEX_op_clz_i32: + case INDEX_op_ctz_i32: + case INDEX_op_clz_i64: + case INDEX_op_ctz_i64: + return &r_r_rAL; + + case INDEX_op_brcond_i32: + case INDEX_op_brcond_i64: + return &r_rA; + + case INDEX_op_movcond_i32: + case INDEX_op_movcond_i64: + return &movc; + + case INDEX_op_qemu_ld_i32: + case INDEX_op_qemu_ld_i64: + return &r_l; + case INDEX_op_qemu_st_i32: + case INDEX_op_qemu_st_i64: + return &lZ_l; + + case INDEX_op_deposit_i32: + case INDEX_op_deposit_i64: + return &dep; + + case INDEX_op_add2_i32: + case INDEX_op_add2_i64: + case INDEX_op_sub2_i32: + case INDEX_op_sub2_i64: + return &add2; + + default: + return NULL; } - return NULL; } static void tcg_target_init(TCGContext *s) -- cgit v1.1 From 7536b82d28876d1ffe0359667b28c93d49386fa0 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Wed, 13 Sep 2017 17:38:44 -0700 Subject: tcg/arm: Fully convert tcg_target_op_def Signed-off-by: Richard Henderson --- tcg/arm/tcg-target.inc.c | 186 +++++++++++++++++++++++++++-------------------- 1 file changed, 107 insertions(+), 79 deletions(-) diff --git a/tcg/arm/tcg-target.inc.c b/tcg/arm/tcg-target.inc.c index 14599a8..98a1253 100644 --- a/tcg/arm/tcg-target.inc.c +++ b/tcg/arm/tcg-target.inc.c @@ -2060,91 +2060,119 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, } } -static const TCGTargetOpDef arm_op_defs[] = { - { INDEX_op_exit_tb, { } }, - { INDEX_op_goto_tb, { } }, - { INDEX_op_br, { } }, - { INDEX_op_goto_ptr, { "r" } }, - - { INDEX_op_ld8u_i32, { "r", "r" } }, - { INDEX_op_ld8s_i32, { "r", "r" } }, - { INDEX_op_ld16u_i32, { "r", "r" } }, - { INDEX_op_ld16s_i32, { "r", "r" } }, - { INDEX_op_ld_i32, { "r", "r" } }, - { INDEX_op_st8_i32, { "r", "r" } }, - { INDEX_op_st16_i32, { "r", "r" } }, - { INDEX_op_st_i32, { "r", "r" } }, - - /* TODO: "r", "r", "ri" */ - { INDEX_op_add_i32, { "r", "r", "rIN" } }, - { INDEX_op_sub_i32, { "r", "rI", "rIN" } }, - { INDEX_op_mul_i32, { "r", "r", "r" } }, - { INDEX_op_mulu2_i32, { "r", "r", "r", "r" } }, - { INDEX_op_muls2_i32, { "r", "r", "r", "r" } }, - { INDEX_op_and_i32, { "r", "r", "rIK" } }, - { INDEX_op_andc_i32, { "r", "r", "rIK" } }, - { INDEX_op_or_i32, { "r", "r", "rI" } }, - { INDEX_op_xor_i32, { "r", "r", "rI" } }, - { INDEX_op_neg_i32, { "r", "r" } }, - { INDEX_op_not_i32, { "r", "r" } }, - - { INDEX_op_shl_i32, { "r", "r", "ri" } }, - { INDEX_op_shr_i32, { "r", "r", "ri" } }, - { INDEX_op_sar_i32, { "r", "r", "ri" } }, - { INDEX_op_rotl_i32, { "r", "r", "ri" } }, - { INDEX_op_rotr_i32, { "r", "r", "ri" } }, - { INDEX_op_clz_i32, { "r", "r", "rIK" } }, - { INDEX_op_ctz_i32, { "r", "r", "rIK" } }, - - { INDEX_op_brcond_i32, { "r", "rIN" } }, - { INDEX_op_setcond_i32, { "r", "r", "rIN" } }, - { INDEX_op_movcond_i32, { "r", "r", "rIN", "rIK", "0" } }, - - { INDEX_op_add2_i32, { "r", "r", "r", "r", "rIN", "rIK" } }, - { INDEX_op_sub2_i32, { "r", "r", "rI", "rI", "rIN", "rIK" } }, - { INDEX_op_brcond2_i32, { "r", "r", "rIN", "rIN" } }, - { INDEX_op_setcond2_i32, { "r", "r", "r", "rIN", "rIN" } }, - -#if TARGET_LONG_BITS == 32 - { INDEX_op_qemu_ld_i32, { "r", "l" } }, - { INDEX_op_qemu_ld_i64, { "r", "r", "l" } }, - { INDEX_op_qemu_st_i32, { "s", "s" } }, - { INDEX_op_qemu_st_i64, { "s", "s", "s" } }, -#else - { INDEX_op_qemu_ld_i32, { "r", "l", "l" } }, - { INDEX_op_qemu_ld_i64, { "r", "r", "l", "l" } }, - { INDEX_op_qemu_st_i32, { "s", "s", "s" } }, - { INDEX_op_qemu_st_i64, { "s", "s", "s", "s" } }, -#endif - - { INDEX_op_bswap16_i32, { "r", "r" } }, - { INDEX_op_bswap32_i32, { "r", "r" } }, - - { INDEX_op_ext8s_i32, { "r", "r" } }, - { INDEX_op_ext16s_i32, { "r", "r" } }, - { INDEX_op_ext16u_i32, { "r", "r" } }, +static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) +{ + static const TCGTargetOpDef r = { .args_ct_str = { "r" } }; + static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } }; + static const TCGTargetOpDef s_s = { .args_ct_str = { "s", "s" } }; + static const TCGTargetOpDef r_l = { .args_ct_str = { "r", "l" } }; + static const TCGTargetOpDef r_r_r = { .args_ct_str = { "r", "r", "r" } }; + static const TCGTargetOpDef r_r_l = { .args_ct_str = { "r", "r", "l" } }; + static const TCGTargetOpDef r_l_l = { .args_ct_str = { "r", "l", "l" } }; + static const TCGTargetOpDef s_s_s = { .args_ct_str = { "s", "s", "s" } }; + static const TCGTargetOpDef r_r_ri = { .args_ct_str = { "r", "r", "ri" } }; + static const TCGTargetOpDef r_r_rI = { .args_ct_str = { "r", "r", "rI" } }; + static const TCGTargetOpDef r_r_rIN + = { .args_ct_str = { "r", "r", "rIN" } }; + static const TCGTargetOpDef r_r_rIK + = { .args_ct_str = { "r", "r", "rIK" } }; + static const TCGTargetOpDef r_r_r_r + = { .args_ct_str = { "r", "r", "r", "r" } }; + static const TCGTargetOpDef r_r_l_l + = { .args_ct_str = { "r", "r", "l", "l" } }; + static const TCGTargetOpDef s_s_s_s + = { .args_ct_str = { "s", "s", "s", "s" } }; + static const TCGTargetOpDef br + = { .args_ct_str = { "r", "rIN" } }; + static const TCGTargetOpDef dep + = { .args_ct_str = { "r", "0", "rZ" } }; + static const TCGTargetOpDef movc + = { .args_ct_str = { "r", "r", "rIN", "rIK", "0" } }; + static const TCGTargetOpDef add2 + = { .args_ct_str = { "r", "r", "r", "r", "rIN", "rIK" } }; + static const TCGTargetOpDef sub2 + = { .args_ct_str = { "r", "r", "rI", "rI", "rIN", "rIK" } }; + static const TCGTargetOpDef br2 + = { .args_ct_str = { "r", "r", "rIN", "rIN" } }; + static const TCGTargetOpDef setc2 + = { .args_ct_str = { "r", "r", "r", "rIN", "rIN" } }; + + switch (op) { + case INDEX_op_goto_ptr: + return &r; - { INDEX_op_deposit_i32, { "r", "0", "rZ" } }, - { INDEX_op_extract_i32, { "r", "r" } }, - { INDEX_op_sextract_i32, { "r", "r" } }, + case INDEX_op_ld8u_i32: + case INDEX_op_ld8s_i32: + case INDEX_op_ld16u_i32: + case INDEX_op_ld16s_i32: + case INDEX_op_ld_i32: + case INDEX_op_st8_i32: + case INDEX_op_st16_i32: + case INDEX_op_st_i32: + case INDEX_op_neg_i32: + case INDEX_op_not_i32: + case INDEX_op_bswap16_i32: + case INDEX_op_bswap32_i32: + case INDEX_op_ext8s_i32: + case INDEX_op_ext16s_i32: + case INDEX_op_ext16u_i32: + case INDEX_op_extract_i32: + case INDEX_op_sextract_i32: + return &r_r; - { INDEX_op_div_i32, { "r", "r", "r" } }, - { INDEX_op_divu_i32, { "r", "r", "r" } }, + case INDEX_op_add_i32: + case INDEX_op_sub_i32: + case INDEX_op_setcond_i32: + return &r_r_rIN; + case INDEX_op_and_i32: + case INDEX_op_andc_i32: + case INDEX_op_clz_i32: + case INDEX_op_ctz_i32: + return &r_r_rIK; + case INDEX_op_mul_i32: + case INDEX_op_div_i32: + case INDEX_op_divu_i32: + return &r_r_r; + case INDEX_op_mulu2_i32: + case INDEX_op_muls2_i32: + return &r_r_r_r; + case INDEX_op_or_i32: + case INDEX_op_xor_i32: + return &r_r_rI; + case INDEX_op_shl_i32: + case INDEX_op_shr_i32: + case INDEX_op_sar_i32: + case INDEX_op_rotl_i32: + case INDEX_op_rotr_i32: + return &r_r_ri; - { INDEX_op_mb, { } }, - { -1 }, -}; + case INDEX_op_brcond_i32: + return &br; + case INDEX_op_deposit_i32: + return &dep; + case INDEX_op_movcond_i32: + return &movc; + case INDEX_op_add2_i32: + return &add2; + case INDEX_op_sub2_i32: + return &sub2; + case INDEX_op_brcond2_i32: + return &br2; + case INDEX_op_setcond2_i32: + return &setc2; -static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) -{ - int i, n = ARRAY_SIZE(arm_op_defs); + case INDEX_op_qemu_ld_i32: + return TARGET_LONG_BITS == 32 ? &r_l : &r_l_l; + case INDEX_op_qemu_ld_i64: + return TARGET_LONG_BITS == 32 ? &r_r_l : &r_r_l_l; + case INDEX_op_qemu_st_i32: + return TARGET_LONG_BITS == 32 ? &s_s : &s_s_s; + case INDEX_op_qemu_st_i64: + return TARGET_LONG_BITS == 32 ? &s_s_s : &s_s_s_s; - for (i = 0; i < n; ++i) { - if (arm_op_defs[i].op == op) { - return &arm_op_defs[i]; - } + default: + return NULL; } - return NULL; } static void tcg_target_init(TCGContext *s) -- cgit v1.1 From 6cb3658a04149b2c1fb92e2ea9d2e2f6cecc0014 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Thu, 14 Sep 2017 02:29:32 +0000 Subject: tcg/ppc: Fully convert tcg_target_op_def Signed-off-by: Richard Henderson --- tcg/ppc/tcg-target.inc.c | 321 +++++++++++++++++++++++++---------------------- 1 file changed, 168 insertions(+), 153 deletions(-) diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c index 8ffc7a7..879885b 100644 --- a/tcg/ppc/tcg-target.inc.c +++ b/tcg/ppc/tcg-target.inc.c @@ -2596,166 +2596,181 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, } } -static const TCGTargetOpDef ppc_op_defs[] = { - { INDEX_op_exit_tb, { } }, - { INDEX_op_goto_tb, { } }, - { INDEX_op_br, { } }, - { INDEX_op_goto_ptr, { "r" } }, - - { INDEX_op_ld8u_i32, { "r", "r" } }, - { INDEX_op_ld8s_i32, { "r", "r" } }, - { INDEX_op_ld16u_i32, { "r", "r" } }, - { INDEX_op_ld16s_i32, { "r", "r" } }, - { INDEX_op_ld_i32, { "r", "r" } }, - - { INDEX_op_st8_i32, { "r", "r" } }, - { INDEX_op_st16_i32, { "r", "r" } }, - { INDEX_op_st_i32, { "r", "r" } }, - - { INDEX_op_add_i32, { "r", "r", "ri" } }, - { INDEX_op_mul_i32, { "r", "r", "rI" } }, - { INDEX_op_div_i32, { "r", "r", "r" } }, - { INDEX_op_divu_i32, { "r", "r", "r" } }, - { INDEX_op_sub_i32, { "r", "rI", "ri" } }, - { INDEX_op_and_i32, { "r", "r", "ri" } }, - { INDEX_op_or_i32, { "r", "r", "ri" } }, - { INDEX_op_xor_i32, { "r", "r", "ri" } }, - { INDEX_op_andc_i32, { "r", "r", "ri" } }, - { INDEX_op_orc_i32, { "r", "r", "ri" } }, - { INDEX_op_eqv_i32, { "r", "r", "ri" } }, - { INDEX_op_nand_i32, { "r", "r", "r" } }, - { INDEX_op_nor_i32, { "r", "r", "r" } }, - { INDEX_op_clz_i32, { "r", "r", "rZW" } }, - { INDEX_op_ctz_i32, { "r", "r", "rZW" } }, - { INDEX_op_ctpop_i32, { "r", "r" } }, - - { INDEX_op_shl_i32, { "r", "r", "ri" } }, - { INDEX_op_shr_i32, { "r", "r", "ri" } }, - { INDEX_op_sar_i32, { "r", "r", "ri" } }, - { INDEX_op_rotl_i32, { "r", "r", "ri" } }, - { INDEX_op_rotr_i32, { "r", "r", "ri" } }, - - { INDEX_op_neg_i32, { "r", "r" } }, - { INDEX_op_not_i32, { "r", "r" } }, - { INDEX_op_ext8s_i32, { "r", "r" } }, - { INDEX_op_ext16s_i32, { "r", "r" } }, - { INDEX_op_bswap16_i32, { "r", "r" } }, - { INDEX_op_bswap32_i32, { "r", "r" } }, - - { INDEX_op_brcond_i32, { "r", "ri" } }, - { INDEX_op_setcond_i32, { "r", "r", "ri" } }, - { INDEX_op_movcond_i32, { "r", "r", "ri", "rZ", "rZ" } }, - - { INDEX_op_deposit_i32, { "r", "0", "rZ" } }, - { INDEX_op_extract_i32, { "r", "r" } }, - - { INDEX_op_muluh_i32, { "r", "r", "r" } }, - { INDEX_op_mulsh_i32, { "r", "r", "r" } }, - -#if TCG_TARGET_REG_BITS == 64 - { INDEX_op_ld8u_i64, { "r", "r" } }, - { INDEX_op_ld8s_i64, { "r", "r" } }, - { INDEX_op_ld16u_i64, { "r", "r" } }, - { INDEX_op_ld16s_i64, { "r", "r" } }, - { INDEX_op_ld32u_i64, { "r", "r" } }, - { INDEX_op_ld32s_i64, { "r", "r" } }, - { INDEX_op_ld_i64, { "r", "r" } }, - - { INDEX_op_st8_i64, { "r", "r" } }, - { INDEX_op_st16_i64, { "r", "r" } }, - { INDEX_op_st32_i64, { "r", "r" } }, - { INDEX_op_st_i64, { "r", "r" } }, - - { INDEX_op_add_i64, { "r", "r", "rT" } }, - { INDEX_op_sub_i64, { "r", "rI", "rT" } }, - { INDEX_op_and_i64, { "r", "r", "ri" } }, - { INDEX_op_or_i64, { "r", "r", "rU" } }, - { INDEX_op_xor_i64, { "r", "r", "rU" } }, - { INDEX_op_andc_i64, { "r", "r", "ri" } }, - { INDEX_op_orc_i64, { "r", "r", "r" } }, - { INDEX_op_eqv_i64, { "r", "r", "r" } }, - { INDEX_op_nand_i64, { "r", "r", "r" } }, - { INDEX_op_nor_i64, { "r", "r", "r" } }, - { INDEX_op_clz_i64, { "r", "r", "rZW" } }, - { INDEX_op_ctz_i64, { "r", "r", "rZW" } }, - { INDEX_op_ctpop_i64, { "r", "r" } }, - - { INDEX_op_shl_i64, { "r", "r", "ri" } }, - { INDEX_op_shr_i64, { "r", "r", "ri" } }, - { INDEX_op_sar_i64, { "r", "r", "ri" } }, - { INDEX_op_rotl_i64, { "r", "r", "ri" } }, - { INDEX_op_rotr_i64, { "r", "r", "ri" } }, - - { INDEX_op_mul_i64, { "r", "r", "rI" } }, - { INDEX_op_div_i64, { "r", "r", "r" } }, - { INDEX_op_divu_i64, { "r", "r", "r" } }, - - { INDEX_op_neg_i64, { "r", "r" } }, - { INDEX_op_not_i64, { "r", "r" } }, - { INDEX_op_ext8s_i64, { "r", "r" } }, - { INDEX_op_ext16s_i64, { "r", "r" } }, - { INDEX_op_ext32s_i64, { "r", "r" } }, - { INDEX_op_ext_i32_i64, { "r", "r" } }, - { INDEX_op_extu_i32_i64, { "r", "r" } }, - { INDEX_op_bswap16_i64, { "r", "r" } }, - { INDEX_op_bswap32_i64, { "r", "r" } }, - { INDEX_op_bswap64_i64, { "r", "r" } }, - - { INDEX_op_brcond_i64, { "r", "ri" } }, - { INDEX_op_setcond_i64, { "r", "r", "ri" } }, - { INDEX_op_movcond_i64, { "r", "r", "ri", "rZ", "rZ" } }, - - { INDEX_op_deposit_i64, { "r", "0", "rZ" } }, - { INDEX_op_extract_i64, { "r", "r" } }, - - { INDEX_op_mulsh_i64, { "r", "r", "r" } }, - { INDEX_op_muluh_i64, { "r", "r", "r" } }, -#endif +static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) +{ + static const TCGTargetOpDef r = { .args_ct_str = { "r" } }; + static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } }; + static const TCGTargetOpDef r_L = { .args_ct_str = { "r", "L" } }; + static const TCGTargetOpDef S_S = { .args_ct_str = { "S", "S" } }; + static const TCGTargetOpDef r_ri = { .args_ct_str = { "r", "ri" } }; + static const TCGTargetOpDef r_r_r = { .args_ct_str = { "r", "r", "r" } }; + static const TCGTargetOpDef r_L_L = { .args_ct_str = { "r", "L", "L" } }; + static const TCGTargetOpDef L_L_L = { .args_ct_str = { "L", "L", "L" } }; + static const TCGTargetOpDef S_S_S = { .args_ct_str = { "S", "S", "S" } }; + static const TCGTargetOpDef r_r_ri = { .args_ct_str = { "r", "r", "ri" } }; + static const TCGTargetOpDef r_r_rI = { .args_ct_str = { "r", "r", "rI" } }; + static const TCGTargetOpDef r_r_rT = { .args_ct_str = { "r", "r", "rT" } }; + static const TCGTargetOpDef r_r_rU = { .args_ct_str = { "r", "r", "rU" } }; + static const TCGTargetOpDef r_rI_ri + = { .args_ct_str = { "r", "rI", "ri" } }; + static const TCGTargetOpDef r_rI_rT + = { .args_ct_str = { "r", "rI", "rT" } }; + static const TCGTargetOpDef r_r_rZW + = { .args_ct_str = { "r", "r", "rZW" } }; + static const TCGTargetOpDef L_L_L_L + = { .args_ct_str = { "L", "L", "L", "L" } }; + static const TCGTargetOpDef S_S_S_S + = { .args_ct_str = { "S", "S", "S", "S" } }; + static const TCGTargetOpDef movc + = { .args_ct_str = { "r", "r", "ri", "rZ", "rZ" } }; + static const TCGTargetOpDef dep + = { .args_ct_str = { "r", "0", "rZ" } }; + static const TCGTargetOpDef br2 + = { .args_ct_str = { "r", "r", "ri", "ri" } }; + static const TCGTargetOpDef setc2 + = { .args_ct_str = { "r", "r", "r", "ri", "ri" } }; + static const TCGTargetOpDef add2 + = { .args_ct_str = { "r", "r", "r", "r", "rI", "rZM" } }; + static const TCGTargetOpDef sub2 + = { .args_ct_str = { "r", "r", "rI", "rZM", "r", "r" } }; + + switch (op) { + case INDEX_op_goto_ptr: + return &r; -#if TCG_TARGET_REG_BITS == 32 - { INDEX_op_brcond2_i32, { "r", "r", "ri", "ri" } }, - { INDEX_op_setcond2_i32, { "r", "r", "r", "ri", "ri" } }, -#endif + case INDEX_op_ld8u_i32: + case INDEX_op_ld8s_i32: + case INDEX_op_ld16u_i32: + case INDEX_op_ld16s_i32: + case INDEX_op_ld_i32: + case INDEX_op_st8_i32: + case INDEX_op_st16_i32: + case INDEX_op_st_i32: + case INDEX_op_ctpop_i32: + case INDEX_op_neg_i32: + case INDEX_op_not_i32: + case INDEX_op_ext8s_i32: + case INDEX_op_ext16s_i32: + case INDEX_op_bswap16_i32: + case INDEX_op_bswap32_i32: + case INDEX_op_extract_i32: + case INDEX_op_ld8u_i64: + case INDEX_op_ld8s_i64: + case INDEX_op_ld16u_i64: + case INDEX_op_ld16s_i64: + case INDEX_op_ld32u_i64: + case INDEX_op_ld32s_i64: + case INDEX_op_ld_i64: + case INDEX_op_st8_i64: + case INDEX_op_st16_i64: + case INDEX_op_st32_i64: + case INDEX_op_st_i64: + case INDEX_op_ctpop_i64: + case INDEX_op_neg_i64: + case INDEX_op_not_i64: + case INDEX_op_ext8s_i64: + case INDEX_op_ext16s_i64: + case INDEX_op_ext32s_i64: + case INDEX_op_ext_i32_i64: + case INDEX_op_extu_i32_i64: + case INDEX_op_bswap16_i64: + case INDEX_op_bswap32_i64: + case INDEX_op_bswap64_i64: + case INDEX_op_extract_i64: + return &r_r; -#if TCG_TARGET_REG_BITS == 64 - { INDEX_op_add2_i64, { "r", "r", "r", "r", "rI", "rZM" } }, - { INDEX_op_sub2_i64, { "r", "r", "rI", "rZM", "r", "r" } }, -#else - { INDEX_op_add2_i32, { "r", "r", "r", "r", "rI", "rZM" } }, - { INDEX_op_sub2_i32, { "r", "r", "rI", "rZM", "r", "r" } }, -#endif + case INDEX_op_add_i32: + case INDEX_op_and_i32: + case INDEX_op_or_i32: + case INDEX_op_xor_i32: + case INDEX_op_andc_i32: + case INDEX_op_orc_i32: + case INDEX_op_eqv_i32: + case INDEX_op_shl_i32: + case INDEX_op_shr_i32: + case INDEX_op_sar_i32: + case INDEX_op_rotl_i32: + case INDEX_op_rotr_i32: + case INDEX_op_setcond_i32: + case INDEX_op_and_i64: + case INDEX_op_andc_i64: + case INDEX_op_shl_i64: + case INDEX_op_shr_i64: + case INDEX_op_sar_i64: + case INDEX_op_rotl_i64: + case INDEX_op_rotr_i64: + case INDEX_op_setcond_i64: + return &r_r_ri; + case INDEX_op_mul_i32: + case INDEX_op_mul_i64: + return &r_r_rI; + case INDEX_op_div_i32: + case INDEX_op_divu_i32: + case INDEX_op_nand_i32: + case INDEX_op_nor_i32: + case INDEX_op_muluh_i32: + case INDEX_op_mulsh_i32: + case INDEX_op_orc_i64: + case INDEX_op_eqv_i64: + case INDEX_op_nand_i64: + case INDEX_op_nor_i64: + case INDEX_op_div_i64: + case INDEX_op_divu_i64: + case INDEX_op_mulsh_i64: + case INDEX_op_muluh_i64: + return &r_r_r; + case INDEX_op_sub_i32: + return &r_rI_ri; + case INDEX_op_add_i64: + return &r_r_rT; + case INDEX_op_or_i64: + case INDEX_op_xor_i64: + return &r_r_rU; + case INDEX_op_sub_i64: + return &r_rI_rT; + case INDEX_op_clz_i32: + case INDEX_op_ctz_i32: + case INDEX_op_clz_i64: + case INDEX_op_ctz_i64: + return &r_r_rZW; -#if TCG_TARGET_REG_BITS == 64 - { INDEX_op_qemu_ld_i32, { "r", "L" } }, - { INDEX_op_qemu_st_i32, { "S", "S" } }, - { INDEX_op_qemu_ld_i64, { "r", "L" } }, - { INDEX_op_qemu_st_i64, { "S", "S" } }, -#elif TARGET_LONG_BITS == 32 - { INDEX_op_qemu_ld_i32, { "r", "L" } }, - { INDEX_op_qemu_st_i32, { "S", "S" } }, - { INDEX_op_qemu_ld_i64, { "L", "L", "L" } }, - { INDEX_op_qemu_st_i64, { "S", "S", "S" } }, -#else - { INDEX_op_qemu_ld_i32, { "r", "L", "L" } }, - { INDEX_op_qemu_st_i32, { "S", "S", "S" } }, - { INDEX_op_qemu_ld_i64, { "L", "L", "L", "L" } }, - { INDEX_op_qemu_st_i64, { "S", "S", "S", "S" } }, -#endif + case INDEX_op_brcond_i32: + case INDEX_op_brcond_i64: + return &r_ri; - { INDEX_op_mb, { } }, - { -1 }, -}; + case INDEX_op_movcond_i32: + case INDEX_op_movcond_i64: + return &movc; + case INDEX_op_deposit_i32: + case INDEX_op_deposit_i64: + return &dep; + case INDEX_op_brcond2_i32: + return &br2; + case INDEX_op_setcond2_i32: + return &setc2; + case INDEX_op_add2_i64: + case INDEX_op_add2_i32: + return &add2; + case INDEX_op_sub2_i64: + case INDEX_op_sub2_i32: + return &sub2; -static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) -{ - int i, n = ARRAY_SIZE(ppc_op_defs); + case INDEX_op_qemu_ld_i32: + return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32 + ? &r_L : &r_L_L); + case INDEX_op_qemu_st_i32: + return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32 + ? &S_S : &S_S_S); + case INDEX_op_qemu_ld_i64: + return (TCG_TARGET_REG_BITS == 64 ? &r_L + : TARGET_LONG_BITS == 32 ? &L_L_L : &L_L_L_L); + case INDEX_op_qemu_st_i64: + return (TCG_TARGET_REG_BITS == 64 ? &S_S + : TARGET_LONG_BITS == 32 ? &S_S_S : &S_S_S_S); - for (i = 0; i < n; ++i) { - if (ppc_op_defs[i].op == op) { - return &ppc_op_defs[i]; - } + default: + return NULL; } - return NULL; } static void tcg_target_init(TCGContext *s) -- cgit v1.1 From 9be44a16c258287aab5a3accda153d3a5144359f Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Thu, 14 Sep 2017 06:50:36 +0300 Subject: tcg/sparc: Fully convert tcg_target_op_def Signed-off-by: Richard Henderson --- tcg/sparc/tcg-target.inc.c | 239 ++++++++++++++++++++++++++------------------- 1 file changed, 137 insertions(+), 102 deletions(-) diff --git a/tcg/sparc/tcg-target.inc.c b/tcg/sparc/tcg-target.inc.c index 1da4deb..bc673bd 100644 --- a/tcg/sparc/tcg-target.inc.c +++ b/tcg/sparc/tcg-target.inc.c @@ -1632,112 +1632,147 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, } } -static const TCGTargetOpDef sparc_op_defs[] = { - { INDEX_op_exit_tb, { } }, - { INDEX_op_goto_tb, { } }, - { INDEX_op_br, { } }, - { INDEX_op_goto_ptr, { "r" } }, - - { INDEX_op_ld8u_i32, { "r", "r" } }, - { INDEX_op_ld8s_i32, { "r", "r" } }, - { INDEX_op_ld16u_i32, { "r", "r" } }, - { INDEX_op_ld16s_i32, { "r", "r" } }, - { INDEX_op_ld_i32, { "r", "r" } }, - { INDEX_op_st8_i32, { "rZ", "r" } }, - { INDEX_op_st16_i32, { "rZ", "r" } }, - { INDEX_op_st_i32, { "rZ", "r" } }, - - { INDEX_op_add_i32, { "r", "rZ", "rJ" } }, - { INDEX_op_mul_i32, { "r", "rZ", "rJ" } }, - { INDEX_op_div_i32, { "r", "rZ", "rJ" } }, - { INDEX_op_divu_i32, { "r", "rZ", "rJ" } }, - { INDEX_op_sub_i32, { "r", "rZ", "rJ" } }, - { INDEX_op_and_i32, { "r", "rZ", "rJ" } }, - { INDEX_op_andc_i32, { "r", "rZ", "rJ" } }, - { INDEX_op_or_i32, { "r", "rZ", "rJ" } }, - { INDEX_op_orc_i32, { "r", "rZ", "rJ" } }, - { INDEX_op_xor_i32, { "r", "rZ", "rJ" } }, - - { INDEX_op_shl_i32, { "r", "rZ", "rJ" } }, - { INDEX_op_shr_i32, { "r", "rZ", "rJ" } }, - { INDEX_op_sar_i32, { "r", "rZ", "rJ" } }, - - { INDEX_op_neg_i32, { "r", "rJ" } }, - { INDEX_op_not_i32, { "r", "rJ" } }, - - { INDEX_op_brcond_i32, { "rZ", "rJ" } }, - { INDEX_op_setcond_i32, { "r", "rZ", "rJ" } }, - { INDEX_op_movcond_i32, { "r", "rZ", "rJ", "rI", "0" } }, - - { INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rJ", "rJ" } }, - { INDEX_op_sub2_i32, { "r", "r", "rZ", "rZ", "rJ", "rJ" } }, - { INDEX_op_mulu2_i32, { "r", "r", "rZ", "rJ" } }, - { INDEX_op_muls2_i32, { "r", "r", "rZ", "rJ" } }, - - { INDEX_op_ld8u_i64, { "R", "r" } }, - { INDEX_op_ld8s_i64, { "R", "r" } }, - { INDEX_op_ld16u_i64, { "R", "r" } }, - { INDEX_op_ld16s_i64, { "R", "r" } }, - { INDEX_op_ld32u_i64, { "R", "r" } }, - { INDEX_op_ld32s_i64, { "R", "r" } }, - { INDEX_op_ld_i64, { "R", "r" } }, - { INDEX_op_st8_i64, { "RZ", "r" } }, - { INDEX_op_st16_i64, { "RZ", "r" } }, - { INDEX_op_st32_i64, { "RZ", "r" } }, - { INDEX_op_st_i64, { "RZ", "r" } }, - - { INDEX_op_add_i64, { "R", "RZ", "RJ" } }, - { INDEX_op_mul_i64, { "R", "RZ", "RJ" } }, - { INDEX_op_div_i64, { "R", "RZ", "RJ" } }, - { INDEX_op_divu_i64, { "R", "RZ", "RJ" } }, - { INDEX_op_sub_i64, { "R", "RZ", "RJ" } }, - { INDEX_op_and_i64, { "R", "RZ", "RJ" } }, - { INDEX_op_andc_i64, { "R", "RZ", "RJ" } }, - { INDEX_op_or_i64, { "R", "RZ", "RJ" } }, - { INDEX_op_orc_i64, { "R", "RZ", "RJ" } }, - { INDEX_op_xor_i64, { "R", "RZ", "RJ" } }, - - { INDEX_op_shl_i64, { "R", "RZ", "RJ" } }, - { INDEX_op_shr_i64, { "R", "RZ", "RJ" } }, - { INDEX_op_sar_i64, { "R", "RZ", "RJ" } }, - - { INDEX_op_neg_i64, { "R", "RJ" } }, - { INDEX_op_not_i64, { "R", "RJ" } }, - - { INDEX_op_ext32s_i64, { "R", "R" } }, - { INDEX_op_ext32u_i64, { "R", "R" } }, - { INDEX_op_ext_i32_i64, { "R", "r" } }, - { INDEX_op_extu_i32_i64, { "R", "r" } }, - { INDEX_op_extrl_i64_i32, { "r", "R" } }, - { INDEX_op_extrh_i64_i32, { "r", "R" } }, - - { INDEX_op_brcond_i64, { "RZ", "RJ" } }, - { INDEX_op_setcond_i64, { "R", "RZ", "RJ" } }, - { INDEX_op_movcond_i64, { "R", "RZ", "RJ", "RI", "0" } }, - - { INDEX_op_add2_i64, { "R", "R", "RZ", "RZ", "RJ", "RI" } }, - { INDEX_op_sub2_i64, { "R", "R", "RZ", "RZ", "RJ", "RI" } }, - { INDEX_op_muluh_i64, { "R", "RZ", "RZ" } }, - - { INDEX_op_qemu_ld_i32, { "r", "A" } }, - { INDEX_op_qemu_ld_i64, { "R", "A" } }, - { INDEX_op_qemu_st_i32, { "sZ", "A" } }, - { INDEX_op_qemu_st_i64, { "SZ", "A" } }, - - { INDEX_op_mb, { } }, - { -1 }, -}; - static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) { - int i, n = ARRAY_SIZE(sparc_op_defs); + static const TCGTargetOpDef r = { .args_ct_str = { "r" } }; + static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } }; + static const TCGTargetOpDef R_r = { .args_ct_str = { "R", "r" } }; + static const TCGTargetOpDef r_R = { .args_ct_str = { "r", "R" } }; + static const TCGTargetOpDef R_R = { .args_ct_str = { "R", "R" } }; + static const TCGTargetOpDef r_A = { .args_ct_str = { "r", "A" } }; + static const TCGTargetOpDef R_A = { .args_ct_str = { "R", "A" } }; + static const TCGTargetOpDef rZ_r = { .args_ct_str = { "rZ", "r" } }; + static const TCGTargetOpDef RZ_r = { .args_ct_str = { "RZ", "r" } }; + static const TCGTargetOpDef sZ_A = { .args_ct_str = { "sZ", "A" } }; + static const TCGTargetOpDef SZ_A = { .args_ct_str = { "SZ", "A" } }; + static const TCGTargetOpDef rZ_rJ = { .args_ct_str = { "rZ", "rJ" } }; + static const TCGTargetOpDef RZ_RJ = { .args_ct_str = { "RZ", "RJ" } }; + static const TCGTargetOpDef R_R_R = { .args_ct_str = { "R", "R", "R" } }; + static const TCGTargetOpDef r_rZ_rJ + = { .args_ct_str = { "r", "rZ", "rJ" } }; + static const TCGTargetOpDef R_RZ_RJ + = { .args_ct_str = { "R", "RZ", "RJ" } }; + static const TCGTargetOpDef r_r_rZ_rJ + = { .args_ct_str = { "r", "r", "rZ", "rJ" } }; + static const TCGTargetOpDef movc_32 + = { .args_ct_str = { "r", "rZ", "rJ", "rI", "0" } }; + static const TCGTargetOpDef movc_64 + = { .args_ct_str = { "R", "RZ", "RJ", "RI", "0" } }; + static const TCGTargetOpDef add2_32 + = { .args_ct_str = { "r", "r", "rZ", "rZ", "rJ", "rJ" } }; + static const TCGTargetOpDef add2_64 + = { .args_ct_str = { "R", "R", "RZ", "RZ", "RJ", "RI" } }; + + switch (op) { + case INDEX_op_goto_ptr: + return &r; - for (i = 0; i < n; ++i) { - if (sparc_op_defs[i].op == op) { - return &sparc_op_defs[i]; - } + case INDEX_op_ld8u_i32: + case INDEX_op_ld8s_i32: + case INDEX_op_ld16u_i32: + case INDEX_op_ld16s_i32: + case INDEX_op_ld_i32: + case INDEX_op_neg_i32: + case INDEX_op_not_i32: + return &r_r; + + case INDEX_op_st8_i32: + case INDEX_op_st16_i32: + case INDEX_op_st_i32: + return &rZ_r; + + case INDEX_op_add_i32: + case INDEX_op_mul_i32: + case INDEX_op_div_i32: + case INDEX_op_divu_i32: + case INDEX_op_sub_i32: + case INDEX_op_and_i32: + case INDEX_op_andc_i32: + case INDEX_op_or_i32: + case INDEX_op_orc_i32: + case INDEX_op_xor_i32: + case INDEX_op_shl_i32: + case INDEX_op_shr_i32: + case INDEX_op_sar_i32: + case INDEX_op_setcond_i32: + return &r_rZ_rJ; + + case INDEX_op_brcond_i32: + return &rZ_rJ; + case INDEX_op_movcond_i32: + return &movc_32; + case INDEX_op_add2_i32: + case INDEX_op_sub2_i32: + return &add2_32; + case INDEX_op_mulu2_i32: + case INDEX_op_muls2_i32: + return &r_r_rZ_rJ; + + case INDEX_op_ld8u_i64: + case INDEX_op_ld8s_i64: + case INDEX_op_ld16u_i64: + case INDEX_op_ld16s_i64: + case INDEX_op_ld32u_i64: + case INDEX_op_ld32s_i64: + case INDEX_op_ld_i64: + case INDEX_op_ext_i32_i64: + case INDEX_op_extu_i32_i64: + return &R_r; + + case INDEX_op_st8_i64: + case INDEX_op_st16_i64: + case INDEX_op_st32_i64: + case INDEX_op_st_i64: + return &RZ_r; + + case INDEX_op_add_i64: + case INDEX_op_mul_i64: + case INDEX_op_div_i64: + case INDEX_op_divu_i64: + case INDEX_op_sub_i64: + case INDEX_op_and_i64: + case INDEX_op_andc_i64: + case INDEX_op_or_i64: + case INDEX_op_orc_i64: + case INDEX_op_xor_i64: + case INDEX_op_shl_i64: + case INDEX_op_shr_i64: + case INDEX_op_sar_i64: + case INDEX_op_setcond_i64: + return &R_RZ_RJ; + + case INDEX_op_neg_i64: + case INDEX_op_not_i64: + case INDEX_op_ext32s_i64: + case INDEX_op_ext32u_i64: + return &R_R; + + case INDEX_op_extrl_i64_i32: + case INDEX_op_extrh_i64_i32: + return &r_R; + + case INDEX_op_brcond_i64: + return &RZ_RJ; + case INDEX_op_movcond_i64: + return &movc_64; + case INDEX_op_add2_i64: + case INDEX_op_sub2_i64: + return &add2_64; + case INDEX_op_muluh_i64: + return &R_R_R; + + case INDEX_op_qemu_ld_i32: + return &r_A; + case INDEX_op_qemu_ld_i64: + return &R_A; + case INDEX_op_qemu_st_i32: + return &sZ_A; + case INDEX_op_qemu_st_i64: + return &SZ_A; + + default: + return NULL; } - return NULL; } static void tcg_target_init(TCGContext *s) -- cgit v1.1 From 89b2e37e6506d92b00ac478e7953be6ddd7a86a9 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Wed, 13 Sep 2017 21:32:38 -0700 Subject: tcg/mips: Fully convert tcg_target_op_def Signed-off-by: Richard Henderson --- tcg/mips/tcg-target.inc.c | 324 ++++++++++++++++++++++++---------------------- 1 file changed, 170 insertions(+), 154 deletions(-) diff --git a/tcg/mips/tcg-target.inc.c b/tcg/mips/tcg-target.inc.c index 1c09ec7..ce40306 100644 --- a/tcg/mips/tcg-target.inc.c +++ b/tcg/mips/tcg-target.inc.c @@ -2163,166 +2163,182 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, } } -static const TCGTargetOpDef mips_op_defs[] = { - { INDEX_op_exit_tb, { } }, - { INDEX_op_goto_tb, { } }, - { INDEX_op_br, { } }, - { INDEX_op_goto_ptr, { "r" } }, - - { INDEX_op_ld8u_i32, { "r", "r" } }, - { INDEX_op_ld8s_i32, { "r", "r" } }, - { INDEX_op_ld16u_i32, { "r", "r" } }, - { INDEX_op_ld16s_i32, { "r", "r" } }, - { INDEX_op_ld_i32, { "r", "r" } }, - { INDEX_op_st8_i32, { "rZ", "r" } }, - { INDEX_op_st16_i32, { "rZ", "r" } }, - { INDEX_op_st_i32, { "rZ", "r" } }, - - { INDEX_op_add_i32, { "r", "rZ", "rJ" } }, - { INDEX_op_mul_i32, { "r", "rZ", "rZ" } }, -#if !use_mips32r6_instructions - { INDEX_op_muls2_i32, { "r", "r", "rZ", "rZ" } }, - { INDEX_op_mulu2_i32, { "r", "r", "rZ", "rZ" } }, -#endif - { INDEX_op_mulsh_i32, { "r", "rZ", "rZ" } }, - { INDEX_op_muluh_i32, { "r", "rZ", "rZ" } }, - { INDEX_op_div_i32, { "r", "rZ", "rZ" } }, - { INDEX_op_divu_i32, { "r", "rZ", "rZ" } }, - { INDEX_op_rem_i32, { "r", "rZ", "rZ" } }, - { INDEX_op_remu_i32, { "r", "rZ", "rZ" } }, - { INDEX_op_sub_i32, { "r", "rZ", "rN" } }, - - { INDEX_op_and_i32, { "r", "rZ", "rIK" } }, - { INDEX_op_nor_i32, { "r", "rZ", "rZ" } }, - { INDEX_op_not_i32, { "r", "rZ" } }, - { INDEX_op_or_i32, { "r", "rZ", "rIZ" } }, - { INDEX_op_xor_i32, { "r", "rZ", "rIZ" } }, - - { INDEX_op_shl_i32, { "r", "rZ", "ri" } }, - { INDEX_op_shr_i32, { "r", "rZ", "ri" } }, - { INDEX_op_sar_i32, { "r", "rZ", "ri" } }, - { INDEX_op_rotr_i32, { "r", "rZ", "ri" } }, - { INDEX_op_rotl_i32, { "r", "rZ", "ri" } }, - { INDEX_op_clz_i32, { "r", "r", "rWZ" } }, - - { INDEX_op_bswap16_i32, { "r", "r" } }, - { INDEX_op_bswap32_i32, { "r", "r" } }, - - { INDEX_op_ext8s_i32, { "r", "rZ" } }, - { INDEX_op_ext16s_i32, { "r", "rZ" } }, - - { INDEX_op_deposit_i32, { "r", "0", "rZ" } }, - { INDEX_op_extract_i32, { "r", "r" } }, - - { INDEX_op_brcond_i32, { "rZ", "rZ" } }, -#if use_mips32r6_instructions - { INDEX_op_movcond_i32, { "r", "rZ", "rZ", "rZ", "rZ" } }, -#else - { INDEX_op_movcond_i32, { "r", "rZ", "rZ", "rZ", "0" } }, -#endif - { INDEX_op_setcond_i32, { "r", "rZ", "rZ" } }, +static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) +{ + static const TCGTargetOpDef r = { .args_ct_str = { "r" } }; + static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } }; + static const TCGTargetOpDef r_L = { .args_ct_str = { "r", "L" } }; + static const TCGTargetOpDef rZ_r = { .args_ct_str = { "rZ", "r" } }; + static const TCGTargetOpDef SZ_S = { .args_ct_str = { "SZ", "S" } }; + static const TCGTargetOpDef rZ_rZ = { .args_ct_str = { "rZ", "rZ" } }; + static const TCGTargetOpDef r_r_L = { .args_ct_str = { "r", "r", "L" } }; + static const TCGTargetOpDef r_L_L = { .args_ct_str = { "r", "L", "L" } }; + static const TCGTargetOpDef r_r_ri = { .args_ct_str = { "r", "r", "ri" } }; + static const TCGTargetOpDef r_r_rI = { .args_ct_str = { "r", "r", "rI" } }; + static const TCGTargetOpDef r_r_rJ = { .args_ct_str = { "r", "r", "rJ" } }; + static const TCGTargetOpDef SZ_S_S = { .args_ct_str = { "SZ", "S", "S" } }; + static const TCGTargetOpDef SZ_SZ_S + = { .args_ct_str = { "SZ", "SZ", "S" } }; + static const TCGTargetOpDef SZ_SZ_S_S + = { .args_ct_str = { "SZ", "SZ", "S", "S" } }; + static const TCGTargetOpDef r_rZ_rN + = { .args_ct_str = { "r", "rZ", "rN" } }; + static const TCGTargetOpDef r_rZ_rZ + = { .args_ct_str = { "r", "rZ", "rZ" } }; + static const TCGTargetOpDef r_r_rIK + = { .args_ct_str = { "r", "r", "rIK" } }; + static const TCGTargetOpDef r_r_rWZ + = { .args_ct_str = { "r", "r", "rWZ" } }; + static const TCGTargetOpDef r_r_r_r + = { .args_ct_str = { "r", "r", "r", "r" } }; + static const TCGTargetOpDef r_r_L_L + = { .args_ct_str = { "r", "r", "L", "L" } }; + static const TCGTargetOpDef dep + = { .args_ct_str = { "r", "0", "rZ" } }; + static const TCGTargetOpDef movc + = { .args_ct_str = { "r", "rZ", "rZ", "rZ", "0" } }; + static const TCGTargetOpDef movc_r6 + = { .args_ct_str = { "r", "rZ", "rZ", "rZ", "rZ" } }; + static const TCGTargetOpDef add2 + = { .args_ct_str = { "r", "r", "rZ", "rZ", "rN", "rN" } }; + static const TCGTargetOpDef br2 + = { .args_ct_str = { "rZ", "rZ", "rZ", "rZ" } }; + static const TCGTargetOpDef setc2 + = { .args_ct_str = { "r", "rZ", "rZ", "rZ", "rZ" } }; + + switch (op) { + case INDEX_op_goto_ptr: + return &r; -#if TCG_TARGET_REG_BITS == 32 - { INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rN", "rN" } }, - { INDEX_op_sub2_i32, { "r", "r", "rZ", "rZ", "rN", "rN" } }, - { INDEX_op_setcond2_i32, { "r", "rZ", "rZ", "rZ", "rZ" } }, - { INDEX_op_brcond2_i32, { "rZ", "rZ", "rZ", "rZ" } }, -#endif + case INDEX_op_ld8u_i32: + case INDEX_op_ld8s_i32: + case INDEX_op_ld16u_i32: + case INDEX_op_ld16s_i32: + case INDEX_op_ld_i32: + case INDEX_op_not_i32: + case INDEX_op_bswap16_i32: + case INDEX_op_bswap32_i32: + case INDEX_op_ext8s_i32: + case INDEX_op_ext16s_i32: + case INDEX_op_extract_i32: + case INDEX_op_ld8u_i64: + case INDEX_op_ld8s_i64: + case INDEX_op_ld16u_i64: + case INDEX_op_ld16s_i64: + case INDEX_op_ld32s_i64: + case INDEX_op_ld32u_i64: + case INDEX_op_ld_i64: + case INDEX_op_not_i64: + case INDEX_op_bswap16_i64: + case INDEX_op_bswap32_i64: + case INDEX_op_bswap64_i64: + case INDEX_op_ext8s_i64: + case INDEX_op_ext16s_i64: + case INDEX_op_ext32s_i64: + case INDEX_op_ext32u_i64: + case INDEX_op_ext_i32_i64: + case INDEX_op_extu_i32_i64: + case INDEX_op_extrl_i64_i32: + case INDEX_op_extrh_i64_i32: + case INDEX_op_extract_i64: + return &r_r; -#if TCG_TARGET_REG_BITS == 64 - { INDEX_op_ld8u_i64, { "r", "r" } }, - { INDEX_op_ld8s_i64, { "r", "r" } }, - { INDEX_op_ld16u_i64, { "r", "r" } }, - { INDEX_op_ld16s_i64, { "r", "r" } }, - { INDEX_op_ld32s_i64, { "r", "r" } }, - { INDEX_op_ld32u_i64, { "r", "r" } }, - { INDEX_op_ld_i64, { "r", "r" } }, - { INDEX_op_st8_i64, { "rZ", "r" } }, - { INDEX_op_st16_i64, { "rZ", "r" } }, - { INDEX_op_st32_i64, { "rZ", "r" } }, - { INDEX_op_st_i64, { "rZ", "r" } }, - - { INDEX_op_add_i64, { "r", "rZ", "rJ" } }, - { INDEX_op_mul_i64, { "r", "rZ", "rZ" } }, -#if !use_mips32r6_instructions - { INDEX_op_muls2_i64, { "r", "r", "rZ", "rZ" } }, - { INDEX_op_mulu2_i64, { "r", "r", "rZ", "rZ" } }, -#endif - { INDEX_op_mulsh_i64, { "r", "rZ", "rZ" } }, - { INDEX_op_muluh_i64, { "r", "rZ", "rZ" } }, - { INDEX_op_div_i64, { "r", "rZ", "rZ" } }, - { INDEX_op_divu_i64, { "r", "rZ", "rZ" } }, - { INDEX_op_rem_i64, { "r", "rZ", "rZ" } }, - { INDEX_op_remu_i64, { "r", "rZ", "rZ" } }, - { INDEX_op_sub_i64, { "r", "rZ", "rN" } }, - - { INDEX_op_and_i64, { "r", "rZ", "rIK" } }, - { INDEX_op_nor_i64, { "r", "rZ", "rZ" } }, - { INDEX_op_not_i64, { "r", "rZ" } }, - { INDEX_op_or_i64, { "r", "rZ", "rI" } }, - { INDEX_op_xor_i64, { "r", "rZ", "rI" } }, - - { INDEX_op_shl_i64, { "r", "rZ", "ri" } }, - { INDEX_op_shr_i64, { "r", "rZ", "ri" } }, - { INDEX_op_sar_i64, { "r", "rZ", "ri" } }, - { INDEX_op_rotr_i64, { "r", "rZ", "ri" } }, - { INDEX_op_rotl_i64, { "r", "rZ", "ri" } }, - { INDEX_op_clz_i64, { "r", "r", "rWZ" } }, - - { INDEX_op_bswap16_i64, { "r", "r" } }, - { INDEX_op_bswap32_i64, { "r", "r" } }, - { INDEX_op_bswap64_i64, { "r", "r" } }, - - { INDEX_op_ext8s_i64, { "r", "rZ" } }, - { INDEX_op_ext16s_i64, { "r", "rZ" } }, - { INDEX_op_ext32s_i64, { "r", "rZ" } }, - { INDEX_op_ext32u_i64, { "r", "rZ" } }, - { INDEX_op_ext_i32_i64, { "r", "rZ" } }, - { INDEX_op_extu_i32_i64, { "r", "rZ" } }, - { INDEX_op_extrl_i64_i32, { "r", "rZ" } }, - { INDEX_op_extrh_i64_i32, { "r", "rZ" } }, - - { INDEX_op_deposit_i64, { "r", "0", "rZ" } }, - { INDEX_op_extract_i64, { "r", "r" } }, - - { INDEX_op_brcond_i64, { "rZ", "rZ" } }, -#if use_mips32r6_instructions - { INDEX_op_movcond_i64, { "r", "rZ", "rZ", "rZ", "rZ" } }, -#else - { INDEX_op_movcond_i64, { "r", "rZ", "rZ", "rZ", "0" } }, -#endif - { INDEX_op_setcond_i64, { "r", "rZ", "rZ" } }, - - { INDEX_op_qemu_ld_i32, { "r", "LZ" } }, - { INDEX_op_qemu_st_i32, { "SZ", "SZ" } }, - { INDEX_op_qemu_ld_i64, { "r", "LZ" } }, - { INDEX_op_qemu_st_i64, { "SZ", "SZ" } }, -#elif TARGET_LONG_BITS == 32 - { INDEX_op_qemu_ld_i32, { "r", "LZ" } }, - { INDEX_op_qemu_st_i32, { "SZ", "SZ" } }, - { INDEX_op_qemu_ld_i64, { "r", "r", "LZ" } }, - { INDEX_op_qemu_st_i64, { "SZ", "SZ", "SZ" } }, -#else - { INDEX_op_qemu_ld_i32, { "r", "LZ", "LZ" } }, - { INDEX_op_qemu_st_i32, { "SZ", "SZ", "SZ" } }, - { INDEX_op_qemu_ld_i64, { "r", "r", "LZ", "LZ" } }, - { INDEX_op_qemu_st_i64, { "SZ", "SZ", "SZ", "SZ" } }, -#endif + case INDEX_op_st8_i32: + case INDEX_op_st16_i32: + case INDEX_op_st_i32: + case INDEX_op_st8_i64: + case INDEX_op_st16_i64: + case INDEX_op_st32_i64: + case INDEX_op_st_i64: + return &rZ_r; - { INDEX_op_mb, { } }, - { -1 }, -}; + case INDEX_op_add_i32: + case INDEX_op_add_i64: + return &r_r_rJ; + case INDEX_op_sub_i32: + case INDEX_op_sub_i64: + return &r_rZ_rN; + case INDEX_op_mul_i32: + case INDEX_op_mulsh_i32: + case INDEX_op_muluh_i32: + case INDEX_op_div_i32: + case INDEX_op_divu_i32: + case INDEX_op_rem_i32: + case INDEX_op_remu_i32: + case INDEX_op_nor_i32: + case INDEX_op_setcond_i32: + case INDEX_op_mul_i64: + case INDEX_op_mulsh_i64: + case INDEX_op_muluh_i64: + case INDEX_op_div_i64: + case INDEX_op_divu_i64: + case INDEX_op_rem_i64: + case INDEX_op_remu_i64: + case INDEX_op_nor_i64: + case INDEX_op_setcond_i64: + return &r_rZ_rZ; + case INDEX_op_muls2_i32: + case INDEX_op_mulu2_i32: + case INDEX_op_muls2_i64: + case INDEX_op_mulu2_i64: + return &r_r_r_r; + case INDEX_op_and_i32: + case INDEX_op_and_i64: + return &r_r_rIK; + case INDEX_op_or_i32: + case INDEX_op_xor_i32: + case INDEX_op_or_i64: + case INDEX_op_xor_i64: + return &r_r_rI; + case INDEX_op_shl_i32: + case INDEX_op_shr_i32: + case INDEX_op_sar_i32: + case INDEX_op_rotr_i32: + case INDEX_op_rotl_i32: + case INDEX_op_shl_i64: + case INDEX_op_shr_i64: + case INDEX_op_sar_i64: + case INDEX_op_rotr_i64: + case INDEX_op_rotl_i64: + return &r_r_ri; + case INDEX_op_clz_i32: + case INDEX_op_clz_i64: + return &r_r_rWZ; -static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) -{ - int i, n = ARRAY_SIZE(mips_op_defs); + case INDEX_op_deposit_i32: + case INDEX_op_deposit_i64: + return &dep; + case INDEX_op_brcond_i32: + case INDEX_op_brcond_i64: + return &rZ_rZ; + case INDEX_op_movcond_i32: + case INDEX_op_movcond_i64: + return use_mips32r6_instructions ? &movc_r6 : &movc; - for (i = 0; i < n; ++i) { - if (mips_op_defs[i].op == op) { - return &mips_op_defs[i]; - } + case INDEX_op_add2_i32: + case INDEX_op_sub2_i32: + return &add2; + case INDEX_op_setcond2_i32: + return &setc2; + case INDEX_op_brcond2_i32: + return &br2; + + case INDEX_op_qemu_ld_i32: + return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32 + ? &r_L : &r_L_L); + case INDEX_op_qemu_st_i32: + return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32 + ? &SZ_S : &SZ_S_S); + case INDEX_op_qemu_ld_i64: + return (TCG_TARGET_REG_BITS == 64 ? &r_L + : TARGET_LONG_BITS == 32 ? &r_r_L : &r_r_L_L); + case INDEX_op_qemu_st_i64: + return (TCG_TARGET_REG_BITS == 64 ? &SZ_S + : TARGET_LONG_BITS == 32 ? &SZ_SZ_S : &SZ_SZ_S_S); + + default: + return NULL; } - return NULL; } static int tcg_target_callee_save_regs[] = { -- cgit v1.1