aboutsummaryrefslogtreecommitdiff
path: root/tcg
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2023-04-25 12:06:48 +0100
committerRichard Henderson <richard.henderson@linaro.org>2023-05-16 15:21:39 -0700
commit933b331b306cd530a441d25245577f30ee0b938e (patch)
tree995f63e0a546b7bfcb58ed030aa0314b062458f9 /tcg
parent12d7fead7c6232b3ddae963b113dcaf7e1cb1f28 (diff)
downloadqemu-933b331b306cd530a441d25245577f30ee0b938e.zip
qemu-933b331b306cd530a441d25245577f30ee0b938e.tar.gz
qemu-933b331b306cd530a441d25245577f30ee0b938e.tar.bz2
tcg/riscv: Support softmmu unaligned accesses
The system is required to emulate unaligned accesses, even if the hardware does not support it. The resulting trap may or may not be more efficient than the qemu slow path. There are linux kernel patches in flight to allow userspace to query hardware support; we can re-evaluate whether to enable this by default after that. In the meantime, softmmu now matches useronly, where we already assumed that unaligned accesses are supported. Reviewed-by: LIU Zhiwei <zhiwei_liu@linux.alibaba.com> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'tcg')
-rw-r--r--tcg/riscv/tcg-target.c.inc48
1 files changed, 28 insertions, 20 deletions
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
index 19cd450..415e6c6 100644
--- a/tcg/riscv/tcg-target.c.inc
+++ b/tcg/riscv/tcg-target.c.inc
@@ -910,12 +910,13 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase,
#ifdef CONFIG_SOFTMMU
unsigned s_bits = opc & MO_SIZE;
+ unsigned s_mask = (1u << s_bits) - 1;
int mem_index = get_mmuidx(oi);
int fast_ofs = TLB_MASK_TABLE_OFS(mem_index);
int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask);
int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table);
- TCGReg mask_base = TCG_AREG0, table_base = TCG_AREG0;
- tcg_target_long compare_mask;
+ int compare_mask;
+ TCGReg addr_adj;
ldst = new_ldst_label(s);
ldst->is_ld = is_ld;
@@ -924,44 +925,51 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase,
QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 11));
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, mask_base, mask_ofs);
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, table_base, table_ofs);
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs);
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs);
tcg_out_opc_imm(s, OPC_SRLI, TCG_REG_TMP2, addr_reg,
TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0);
tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1);
- /* Load the tlb comparator and the addend. */
- tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP0, TCG_REG_TMP2,
- is_ld ? offsetof(CPUTLBEntry, addr_read)
- : offsetof(CPUTLBEntry, addr_write));
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2,
- offsetof(CPUTLBEntry, addend));
-
- /* We don't support unaligned accesses. */
+ /*
+ * For aligned accesses, we check the first byte and include the alignment
+ * bits within the address. For unaligned access, we check that we don't
+ * cross pages using the address of the last byte of the access.
+ */
+ addr_adj = addr_reg;
if (a_bits < s_bits) {
- a_bits = s_bits;
+ addr_adj = TCG_REG_TMP0;
+ tcg_out_opc_imm(s, TARGET_LONG_BITS == 32 ? OPC_ADDIW : OPC_ADDI,
+ addr_adj, addr_reg, s_mask - a_mask);
}
- /* Clear the non-page, non-alignment bits from the address. */
- compare_mask = (tcg_target_long)TARGET_PAGE_MASK | a_mask;
+ compare_mask = TARGET_PAGE_MASK | a_mask;
if (compare_mask == sextreg(compare_mask, 0, 12)) {
- tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_reg, compare_mask);
+ tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_adj, compare_mask);
} else {
tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_TMP1, compare_mask);
- tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP1, TCG_REG_TMP1, addr_reg);
+ tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP1, TCG_REG_TMP1, addr_adj);
}
+ /* Load the tlb comparator and the addend. */
+ tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP0, TCG_REG_TMP2,
+ is_ld ? offsetof(CPUTLBEntry, addr_read)
+ : offsetof(CPUTLBEntry, addr_write));
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2,
+ offsetof(CPUTLBEntry, addend));
+
/* Compare masked address with the TLB entry. */
ldst->label_ptr[0] = s->code_ptr;
tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP0, TCG_REG_TMP1, 0);
/* TLB Hit - translate address using addend. */
+ addr_adj = addr_reg;
if (TARGET_LONG_BITS == 32) {
- tcg_out_ext32u(s, TCG_REG_TMP0, addr_reg);
- addr_reg = TCG_REG_TMP0;
+ addr_adj = TCG_REG_TMP0;
+ tcg_out_ext32u(s, addr_adj, addr_reg);
}
- tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP2, addr_reg);
+ tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP2, addr_adj);
*pbase = TCG_REG_TMP0;
#else
if (a_mask) {