diff options
author | Richard Henderson <richard.henderson@linaro.org> | 2023-10-01 17:13:00 +0000 |
---|---|---|
committer | Richard Henderson <richard.henderson@linaro.org> | 2023-10-22 16:32:28 -0700 |
commit | 2c53bdf110ef09422ceed89357a7dd8fc7d0b7a8 (patch) | |
tree | 5151d71ce0eccde25443bb1ba1def1f825f5f2d7 /tcg | |
parent | 23088ca0bcf491eaa3e99a2760cf85f4cd7a3bce (diff) | |
download | qemu-2c53bdf110ef09422ceed89357a7dd8fc7d0b7a8.zip qemu-2c53bdf110ef09422ceed89357a7dd8fc7d0b7a8.tar.gz qemu-2c53bdf110ef09422ceed89357a7dd8fc7d0b7a8.tar.bz2 |
tcg/arm: Use tcg_use_softmmu
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'tcg')
-rw-r--r-- | tcg/arm/tcg-target.c.inc | 209 |
1 files changed, 100 insertions, 109 deletions
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc index 0d9c2d1..fc78566 100644 --- a/tcg/arm/tcg-target.c.inc +++ b/tcg/arm/tcg-target.c.inc @@ -89,9 +89,7 @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) #define TCG_REG_TMP TCG_REG_R12 #define TCG_VEC_TMP TCG_REG_Q15 -#ifndef CONFIG_SOFTMMU #define TCG_REG_GUEST_BASE TCG_REG_R11 -#endif typedef enum { COND_EQ = 0x0, @@ -356,14 +354,8 @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type, * r0-r3 will be overwritten when reading the tlb entry (system-mode only); * r14 will be overwritten by the BLNE branching to the slow path. */ -#ifdef CONFIG_SOFTMMU #define ALL_QLDST_REGS \ - (ALL_GENERAL_REGS & ~((1 << TCG_REG_R0) | (1 << TCG_REG_R1) | \ - (1 << TCG_REG_R2) | (1 << TCG_REG_R3) | \ - (1 << TCG_REG_R14))) -#else -#define ALL_QLDST_REGS (ALL_GENERAL_REGS & ~(1 << TCG_REG_R14)) -#endif + (ALL_GENERAL_REGS & ~((tcg_use_softmmu ? 0xf : 0) | (1 << TCG_REG_R14))) /* * ARM immediates for ALU instructions are made of an unsigned 8-bit @@ -1387,113 +1379,115 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, MemOp opc = get_memop(oi); unsigned a_mask; -#ifdef CONFIG_SOFTMMU - *h = (HostAddress){ - .cond = COND_AL, - .base = addrlo, - .index = TCG_REG_R1, - .index_scratch = true, - }; -#else - *h = (HostAddress){ - .cond = COND_AL, - .base = addrlo, - .index = guest_base ? TCG_REG_GUEST_BASE : -1, - .index_scratch = false, - }; -#endif + if (tcg_use_softmmu) { + *h = (HostAddress){ + .cond = COND_AL, + .base = addrlo, + .index = TCG_REG_R1, + .index_scratch = true, + }; + } else { + *h = (HostAddress){ + .cond = COND_AL, + .base = addrlo, + .index = guest_base ? TCG_REG_GUEST_BASE : -1, + .index_scratch = false, + }; + } h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false); a_mask = (1 << h->aa.align) - 1; -#ifdef CONFIG_SOFTMMU - int mem_index = get_mmuidx(oi); - int cmp_off = is_ld ? offsetof(CPUTLBEntry, addr_read) - : offsetof(CPUTLBEntry, addr_write); - int fast_off = tlb_mask_table_ofs(s, mem_index); - unsigned s_mask = (1 << (opc & MO_SIZE)) - 1; - TCGReg t_addr; - - ldst = new_ldst_label(s); - ldst->is_ld = is_ld; - ldst->oi = oi; - ldst->addrlo_reg = addrlo; - ldst->addrhi_reg = addrhi; - - /* Load cpu->neg.tlb.f[mmu_idx].{mask,table} into {r0,r1}. */ - QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0); - QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 4); - tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off); - - /* Extract the tlb index from the address into R0. */ - tcg_out_dat_reg(s, COND_AL, ARITH_AND, TCG_REG_R0, TCG_REG_R0, addrlo, - SHIFT_IMM_LSR(s->page_bits - CPU_TLB_ENTRY_BITS)); + if (tcg_use_softmmu) { + int mem_index = get_mmuidx(oi); + int cmp_off = is_ld ? offsetof(CPUTLBEntry, addr_read) + : offsetof(CPUTLBEntry, addr_write); + int fast_off = tlb_mask_table_ofs(s, mem_index); + unsigned s_mask = (1 << (opc & MO_SIZE)) - 1; + TCGReg t_addr; - /* - * Add the tlb_table pointer, creating the CPUTLBEntry address in R1. - * Load the tlb comparator into R2/R3 and the fast path addend into R1. - */ - QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN); - if (cmp_off == 0) { - if (s->addr_type == TCG_TYPE_I32) { - tcg_out_ld32_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0); - } else { - tcg_out_ldrd_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0); - } - } else { - tcg_out_dat_reg(s, COND_AL, ARITH_ADD, - TCG_REG_R1, TCG_REG_R1, TCG_REG_R0, 0); - if (s->addr_type == TCG_TYPE_I32) { - tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off); + ldst = new_ldst_label(s); + ldst->is_ld = is_ld; + ldst->oi = oi; + ldst->addrlo_reg = addrlo; + ldst->addrhi_reg = addrhi; + + /* Load cpu->neg.tlb.f[mmu_idx].{mask,table} into {r0,r1}. */ + QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0); + QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 4); + tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off); + + /* Extract the tlb index from the address into R0. */ + tcg_out_dat_reg(s, COND_AL, ARITH_AND, TCG_REG_R0, TCG_REG_R0, addrlo, + SHIFT_IMM_LSR(s->page_bits - CPU_TLB_ENTRY_BITS)); + + /* + * Add the tlb_table pointer, creating the CPUTLBEntry address in R1. + * Load the tlb comparator into R2/R3 and the fast path addend into R1. + */ + QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN); + if (cmp_off == 0) { + if (s->addr_type == TCG_TYPE_I32) { + tcg_out_ld32_rwb(s, COND_AL, TCG_REG_R2, + TCG_REG_R1, TCG_REG_R0); + } else { + tcg_out_ldrd_rwb(s, COND_AL, TCG_REG_R2, + TCG_REG_R1, TCG_REG_R0); + } } else { - tcg_out_ldrd_8(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off); + tcg_out_dat_reg(s, COND_AL, ARITH_ADD, + TCG_REG_R1, TCG_REG_R1, TCG_REG_R0, 0); + if (s->addr_type == TCG_TYPE_I32) { + tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off); + } else { + tcg_out_ldrd_8(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off); + } } - } - /* Load the tlb addend. */ - tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R1, - offsetof(CPUTLBEntry, addend)); + /* Load the tlb addend. */ + tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R1, + offsetof(CPUTLBEntry, addend)); - /* - * Check alignment, check comparators. - * Do this in 2-4 insns. Use MOVW for v7, if possible, - * to reduce the number of sequential conditional instructions. - * Almost all guests have at least 4k pages, which means that we need - * to clear at least 9 bits even for an 8-byte memory, which means it - * isn't worth checking for an immediate operand for BIC. - * - * For unaligned accesses, test the page of the last unit of alignment. - * This leaves the least significant alignment bits unchanged, and of - * course must be zero. - */ - t_addr = addrlo; - if (a_mask < s_mask) { - t_addr = TCG_REG_R0; - tcg_out_dat_imm(s, COND_AL, ARITH_ADD, t_addr, - addrlo, s_mask - a_mask); - } - if (use_armv7_instructions && s->page_bits <= 16) { - tcg_out_movi32(s, COND_AL, TCG_REG_TMP, ~(s->page_mask | a_mask)); - tcg_out_dat_reg(s, COND_AL, ARITH_BIC, TCG_REG_TMP, - t_addr, TCG_REG_TMP, 0); - tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R2, TCG_REG_TMP, 0); - } else { - if (a_mask) { - tcg_debug_assert(a_mask <= 0xff); - tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask); + /* + * Check alignment, check comparators. + * Do this in 2-4 insns. Use MOVW for v7, if possible, + * to reduce the number of sequential conditional instructions. + * Almost all guests have at least 4k pages, which means that we need + * to clear at least 9 bits even for an 8-byte memory, which means it + * isn't worth checking for an immediate operand for BIC. + * + * For unaligned accesses, test the page of the last unit of alignment. + * This leaves the least significant alignment bits unchanged, and of + * course must be zero. + */ + t_addr = addrlo; + if (a_mask < s_mask) { + t_addr = TCG_REG_R0; + tcg_out_dat_imm(s, COND_AL, ARITH_ADD, t_addr, + addrlo, s_mask - a_mask); + } + if (use_armv7_instructions && s->page_bits <= 16) { + tcg_out_movi32(s, COND_AL, TCG_REG_TMP, ~(s->page_mask | a_mask)); + tcg_out_dat_reg(s, COND_AL, ARITH_BIC, TCG_REG_TMP, + t_addr, TCG_REG_TMP, 0); + tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, + TCG_REG_R2, TCG_REG_TMP, 0); + } else { + if (a_mask) { + tcg_debug_assert(a_mask <= 0xff); + tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask); + } + tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, t_addr, + SHIFT_IMM_LSR(s->page_bits)); + tcg_out_dat_reg(s, (a_mask ? COND_EQ : COND_AL), ARITH_CMP, + 0, TCG_REG_R2, TCG_REG_TMP, + SHIFT_IMM_LSL(s->page_bits)); } - tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, t_addr, - SHIFT_IMM_LSR(s->page_bits)); - tcg_out_dat_reg(s, (a_mask ? COND_EQ : COND_AL), ARITH_CMP, - 0, TCG_REG_R2, TCG_REG_TMP, - SHIFT_IMM_LSL(s->page_bits)); - } - if (s->addr_type != TCG_TYPE_I32) { - tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, TCG_REG_R3, addrhi, 0); - } -#else - if (a_mask) { + if (s->addr_type != TCG_TYPE_I32) { + tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, TCG_REG_R3, addrhi, 0); + } + } else if (a_mask) { ldst = new_ldst_label(s); ldst->is_ld = is_ld; ldst->oi = oi; @@ -1505,7 +1499,6 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, /* tst addr, #mask */ tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask); } -#endif return ldst; } @@ -2931,12 +2924,10 @@ static void tcg_target_qemu_prologue(TCGContext *s) tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); -#ifndef CONFIG_SOFTMMU - if (guest_base) { + if (!tcg_use_softmmu && guest_base) { tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, guest_base); tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE); } -#endif tcg_out_b_reg(s, COND_AL, tcg_target_call_iarg_regs[1]); |