aboutsummaryrefslogtreecommitdiff
path: root/target
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2023-11-03 10:38:20 -0700
committerRichard Henderson <richard.henderson@linaro.org>2024-02-03 16:46:10 +1000
commit98271007379bae8ca4300ae5d8cdc37ec662ee73 (patch)
treef842c686b060eade126fd048c9317ecf29a88752 /target
parent45bf0e7aa648369cf8ab2333bd20144806fc1be3 (diff)
downloadqemu-98271007379bae8ca4300ae5d8cdc37ec662ee73.zip
qemu-98271007379bae8ca4300ae5d8cdc37ec662ee73.tar.gz
qemu-98271007379bae8ca4300ae5d8cdc37ec662ee73.tar.bz2
target/sparc: Use tcg_gen_qemu_{ld, st}_i128 for ASI_M_BCOPY
Align the operation to the 32-byte cacheline. Use 2 pair of i128 instead of 8 pair of i32. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Tested-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> Acked-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> Message-Id: <20231103173841.33651-2-richard.henderson@linaro.org>
Diffstat (limited to 'target')
-rw-r--r--target/sparc/translate.c45
1 files changed, 26 insertions, 19 deletions
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
index 97184fa..1082aab 100644
--- a/target/sparc/translate.c
+++ b/target/sparc/translate.c
@@ -1727,28 +1727,35 @@ static void gen_st_asi(DisasContext *dc, DisasASI *da, TCGv src, TCGv addr)
case GET_ASI_BCOPY:
assert(TARGET_LONG_BITS == 32);
- /* Copy 32 bytes from the address in SRC to ADDR. */
- /* ??? The original qemu code suggests 4-byte alignment, dropping
- the low bits, but the only place I can see this used is in the
- Linux kernel with 32 byte alignment, which would make more sense
- as a cacheline-style operation. */
+ /*
+ * Copy 32 bytes from the address in SRC to ADDR.
+ *
+ * From Ross RT625 hyperSPARC manual, section 4.6:
+ * "Block Copy and Block Fill will work only on cache line boundaries."
+ *
+ * It does not specify if an unaliged address is truncated or trapped.
+ * Previous qemu behaviour was to truncate to 4 byte alignment, which
+ * is obviously wrong. The only place I can see this used is in the
+ * Linux kernel which begins with page alignment, advancing by 32,
+ * so is always aligned. Assume truncation as the simpler option.
+ *
+ * Since the loads and stores are paired, allow the copy to happen
+ * in the host endianness. The copy need not be atomic.
+ */
{
+ MemOp mop = MO_128 | MO_ATOM_IFALIGN_PAIR;
TCGv saddr = tcg_temp_new();
TCGv daddr = tcg_temp_new();
- TCGv four = tcg_constant_tl(4);
- TCGv_i32 tmp = tcg_temp_new_i32();
- int i;
-
- tcg_gen_andi_tl(saddr, src, -4);
- tcg_gen_andi_tl(daddr, addr, -4);
- for (i = 0; i < 32; i += 4) {
- /* Since the loads and stores are paired, allow the
- copy to happen in the host endianness. */
- tcg_gen_qemu_ld_i32(tmp, saddr, da->mem_idx, MO_UL);
- tcg_gen_qemu_st_i32(tmp, daddr, da->mem_idx, MO_UL);
- tcg_gen_add_tl(saddr, saddr, four);
- tcg_gen_add_tl(daddr, daddr, four);
- }
+ TCGv_i128 tmp = tcg_temp_new_i128();
+
+ tcg_gen_andi_tl(saddr, src, -32);
+ tcg_gen_andi_tl(daddr, addr, -32);
+ tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
+ tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
+ tcg_gen_addi_tl(saddr, saddr, 16);
+ tcg_gen_addi_tl(daddr, daddr, 16);
+ tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
+ tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
}
break;