aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPatrick O'Neill <patrick@rivosinc.com>2023-10-31 13:18:53 -0700
committerPatrick O'Neill <patrick@rivosinc.com>2023-11-01 15:06:04 -0700
commit274c904471a11c493353a8b4f6e50d8fb7fef906 (patch)
tree38935829ff29b0e8593a7c28cf159d48e9fda5d9
parentea2e7bf80b8deead064d9b54c3caa852dfe009b3 (diff)
downloadgcc-274c904471a11c493353a8b4f6e50d8fb7fef906.zip
gcc-274c904471a11c493353a8b4f6e50d8fb7fef906.tar.gz
gcc-274c904471a11c493353a8b4f6e50d8fb7fef906.tar.bz2
RISC-V: Use riscv_subword_address for atomic_test_and_set
Other subword atomic patterns use riscv_subword_address to calculate the aligned address, shift amount, mask and !mask. atomic_test_and_set was implemented before the common function was added. After this patch all subword atomic patterns use riscv_subword_address. gcc/ChangeLog: * config/riscv/sync.md: Use riscv_subword_address function to calculate the address and shift in atomic_test_and_set. Signed-off-by: Patrick O'Neill <patrick@rivosinc.com>
-rw-r--r--gcc/config/riscv/sync.md41
1 files changed, 17 insertions, 24 deletions
diff --git a/gcc/config/riscv/sync.md b/gcc/config/riscv/sync.md
index ec9d4b4..f05cccf 100644
--- a/gcc/config/riscv/sync.md
+++ b/gcc/config/riscv/sync.md
@@ -504,43 +504,36 @@
(set (attr "length") (const_int 28))])
(define_expand "atomic_test_and_set"
- [(match_operand:QI 0 "register_operand" "") ;; bool output
+ [(match_operand:QI 0 "register_operand" "") ;; bool output
(match_operand:QI 1 "memory_operand" "+A") ;; memory
- (match_operand:SI 2 "const_int_operand" "")] ;; model
+ (match_operand:SI 2 "const_int_operand" "")] ;; model
"TARGET_ATOMIC"
{
/* We have no QImode atomics, so use the address LSBs to form a mask,
then use an aligned SImode atomic. */
- rtx result = operands[0];
+ rtx old = gen_reg_rtx (SImode);
rtx mem = operands[1];
rtx model = operands[2];
- rtx addr = force_reg (Pmode, XEXP (mem, 0));
-
- rtx aligned_addr = gen_reg_rtx (Pmode);
- emit_move_insn (aligned_addr, gen_rtx_AND (Pmode, addr, GEN_INT (-4)));
+ rtx set = gen_reg_rtx (QImode);
+ rtx aligned_mem = gen_reg_rtx (SImode);
+ rtx shift = gen_reg_rtx (SImode);
- rtx aligned_mem = change_address (mem, SImode, aligned_addr);
- set_mem_alias_set (aligned_mem, 0);
+ /* Unused. */
+ rtx _mask = gen_reg_rtx (SImode);
+ rtx _not_mask = gen_reg_rtx (SImode);
- rtx offset = gen_reg_rtx (SImode);
- emit_move_insn (offset, gen_rtx_AND (SImode, gen_lowpart (SImode, addr),
- GEN_INT (3)));
+ riscv_subword_address (mem, &aligned_mem, &shift, &_mask, &_not_mask);
- rtx tmp = gen_reg_rtx (SImode);
- emit_move_insn (tmp, GEN_INT (1));
+ emit_move_insn (set, GEN_INT (1));
+ rtx shifted_set = gen_reg_rtx (SImode);
+ riscv_lshift_subword (QImode, set, shift, &shifted_set);
- rtx shmt = gen_reg_rtx (SImode);
- emit_move_insn (shmt, gen_rtx_ASHIFT (SImode, offset, GEN_INT (3)));
+ emit_insn (gen_atomic_fetch_orsi (old, aligned_mem, shifted_set, model));
- rtx word = gen_reg_rtx (SImode);
- emit_move_insn (word, gen_rtx_ASHIFT (SImode, tmp,
- gen_lowpart (QImode, shmt)));
+ emit_move_insn (old, gen_rtx_ASHIFTRT (SImode, old,
+ gen_lowpart (QImode, shift)));
- tmp = gen_reg_rtx (SImode);
- emit_insn (gen_atomic_fetch_orsi (tmp, aligned_mem, word, model));
+ emit_move_insn (operands[0], gen_lowpart (QImode, old));
- emit_move_insn (gen_lowpart (SImode, result),
- gen_rtx_LSHIFTRT (SImode, tmp,
- gen_lowpart (QImode, shmt)));
DONE;
})