diff options
author | Richard Henderson <richard.henderson@linaro.org> | 2023-10-01 12:35:44 -0700 |
---|---|---|
committer | Richard Henderson <richard.henderson@linaro.org> | 2023-10-22 16:32:28 -0700 |
commit | 4944d359107fa3e88200697daef28e3b5878daa6 (patch) | |
tree | da2213a8b2d94b96ebba2af829a745927a03b1b4 /tcg | |
parent | cf0ed30eb10c7341fd1f253446e3bbb6e0114c30 (diff) | |
download | qemu-4944d359107fa3e88200697daef28e3b5878daa6.zip qemu-4944d359107fa3e88200697daef28e3b5878daa6.tar.gz qemu-4944d359107fa3e88200697daef28e3b5878daa6.tar.bz2 |
tcg/riscv: Use tcg_use_softmmu
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'tcg')
-rw-r--r-- | tcg/riscv/tcg-target.c.inc | 177 |
1 files changed, 90 insertions, 87 deletions
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc index dc71f82..34e10e7 100644 --- a/tcg/riscv/tcg-target.c.inc +++ b/tcg/riscv/tcg-target.c.inc @@ -1245,105 +1245,110 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase, aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false); a_mask = (1u << aa.align) - 1; -#ifdef CONFIG_SOFTMMU - unsigned s_bits = opc & MO_SIZE; - unsigned s_mask = (1u << s_bits) - 1; - int mem_index = get_mmuidx(oi); - int fast_ofs = tlb_mask_table_ofs(s, mem_index); - int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask); - int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table); - int compare_mask; - TCGReg addr_adj; - - ldst = new_ldst_label(s); - ldst->is_ld = is_ld; - ldst->oi = oi; - ldst->addrlo_reg = addr_reg; - - tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs); - tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs); - - tcg_out_opc_imm(s, OPC_SRLI, TCG_REG_TMP2, addr_reg, - s->page_bits - CPU_TLB_ENTRY_BITS); - tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0); - tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1); + if (tcg_use_softmmu) { + unsigned s_bits = opc & MO_SIZE; + unsigned s_mask = (1u << s_bits) - 1; + int mem_index = get_mmuidx(oi); + int fast_ofs = tlb_mask_table_ofs(s, mem_index); + int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask); + int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table); + int compare_mask; + TCGReg addr_adj; - /* - * For aligned accesses, we check the first byte and include the alignment - * bits within the address. For unaligned access, we check that we don't - * cross pages using the address of the last byte of the access. - */ - addr_adj = addr_reg; - if (a_mask < s_mask) { - addr_adj = TCG_REG_TMP0; - tcg_out_opc_imm(s, addr_type == TCG_TYPE_I32 ? OPC_ADDIW : OPC_ADDI, - addr_adj, addr_reg, s_mask - a_mask); - } - compare_mask = s->page_mask | a_mask; - if (compare_mask == sextreg(compare_mask, 0, 12)) { - tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_adj, compare_mask); - } else { - tcg_out_movi(s, addr_type, TCG_REG_TMP1, compare_mask); - tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP1, TCG_REG_TMP1, addr_adj); - } - - /* Load the tlb comparator and the addend. */ - QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN); - tcg_out_ld(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP2, - is_ld ? offsetof(CPUTLBEntry, addr_read) - : offsetof(CPUTLBEntry, addr_write)); - tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2, - offsetof(CPUTLBEntry, addend)); - - /* Compare masked address with the TLB entry. */ - ldst->label_ptr[0] = s->code_ptr; - tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP0, TCG_REG_TMP1, 0); - - /* TLB Hit - translate address using addend. */ - if (addr_type != TCG_TYPE_I32) { - tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, addr_reg, TCG_REG_TMP2); - } else if (have_zba) { - tcg_out_opc_reg(s, OPC_ADD_UW, TCG_REG_TMP0, addr_reg, TCG_REG_TMP2); - } else { - tcg_out_ext32u(s, TCG_REG_TMP0, addr_reg); - tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP0, TCG_REG_TMP2); - } - *pbase = TCG_REG_TMP0; -#else - TCGReg base; - - if (a_mask) { ldst = new_ldst_label(s); ldst->is_ld = is_ld; ldst->oi = oi; ldst->addrlo_reg = addr_reg; - /* We are expecting alignment max 7, so we can always use andi. */ - tcg_debug_assert(a_mask == sextreg(a_mask, 0, 12)); - tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_reg, a_mask); + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs); + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs); + + tcg_out_opc_imm(s, OPC_SRLI, TCG_REG_TMP2, addr_reg, + s->page_bits - CPU_TLB_ENTRY_BITS); + tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0); + tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1); + + /* + * For aligned accesses, we check the first byte and include the + * alignment bits within the address. For unaligned access, we + * check that we don't cross pages using the address of the last + * byte of the access. + */ + addr_adj = addr_reg; + if (a_mask < s_mask) { + addr_adj = TCG_REG_TMP0; + tcg_out_opc_imm(s, addr_type == TCG_TYPE_I32 ? OPC_ADDIW : OPC_ADDI, + addr_adj, addr_reg, s_mask - a_mask); + } + compare_mask = s->page_mask | a_mask; + if (compare_mask == sextreg(compare_mask, 0, 12)) { + tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_adj, compare_mask); + } else { + tcg_out_movi(s, addr_type, TCG_REG_TMP1, compare_mask); + tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP1, TCG_REG_TMP1, addr_adj); + } + + /* Load the tlb comparator and the addend. */ + QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN); + tcg_out_ld(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP2, + is_ld ? offsetof(CPUTLBEntry, addr_read) + : offsetof(CPUTLBEntry, addr_write)); + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2, + offsetof(CPUTLBEntry, addend)); + /* Compare masked address with the TLB entry. */ ldst->label_ptr[0] = s->code_ptr; - tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP1, TCG_REG_ZERO, 0); - } + tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP0, TCG_REG_TMP1, 0); - if (guest_base != 0) { - base = TCG_REG_TMP0; + /* TLB Hit - translate address using addend. */ if (addr_type != TCG_TYPE_I32) { - tcg_out_opc_reg(s, OPC_ADD, base, addr_reg, TCG_GUEST_BASE_REG); + tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, addr_reg, TCG_REG_TMP2); } else if (have_zba) { - tcg_out_opc_reg(s, OPC_ADD_UW, base, addr_reg, TCG_GUEST_BASE_REG); + tcg_out_opc_reg(s, OPC_ADD_UW, TCG_REG_TMP0, + addr_reg, TCG_REG_TMP2); } else { - tcg_out_ext32u(s, base, addr_reg); - tcg_out_opc_reg(s, OPC_ADD, base, base, TCG_GUEST_BASE_REG); + tcg_out_ext32u(s, TCG_REG_TMP0, addr_reg); + tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, + TCG_REG_TMP0, TCG_REG_TMP2); } - } else if (addr_type != TCG_TYPE_I32) { - base = addr_reg; + *pbase = TCG_REG_TMP0; } else { - base = TCG_REG_TMP0; - tcg_out_ext32u(s, base, addr_reg); + TCGReg base; + + if (a_mask) { + ldst = new_ldst_label(s); + ldst->is_ld = is_ld; + ldst->oi = oi; + ldst->addrlo_reg = addr_reg; + + /* We are expecting alignment max 7, so we can always use andi. */ + tcg_debug_assert(a_mask == sextreg(a_mask, 0, 12)); + tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_reg, a_mask); + + ldst->label_ptr[0] = s->code_ptr; + tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP1, TCG_REG_ZERO, 0); + } + + if (guest_base != 0) { + base = TCG_REG_TMP0; + if (addr_type != TCG_TYPE_I32) { + tcg_out_opc_reg(s, OPC_ADD, base, addr_reg, + TCG_GUEST_BASE_REG); + } else if (have_zba) { + tcg_out_opc_reg(s, OPC_ADD_UW, base, addr_reg, + TCG_GUEST_BASE_REG); + } else { + tcg_out_ext32u(s, base, addr_reg); + tcg_out_opc_reg(s, OPC_ADD, base, base, TCG_GUEST_BASE_REG); + } + } else if (addr_type != TCG_TYPE_I32) { + base = addr_reg; + } else { + base = TCG_REG_TMP0; + tcg_out_ext32u(s, base, addr_reg); + } + *pbase = base; } - *pbase = base; -#endif return ldst; } @@ -2075,12 +2080,10 @@ static void tcg_target_qemu_prologue(TCGContext *s) TCG_REG_SP, SAVE_OFS + i * REG_SIZE); } -#if !defined(CONFIG_SOFTMMU) - if (guest_base) { + if (!tcg_use_softmmu && guest_base) { tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base); tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); } -#endif /* Call generated code */ tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); |