aboutsummaryrefslogtreecommitdiff
path: root/gcc/config/aarch64/atomics.md
diff options
context:
space:
mode:
authorMatthew Wahab <matthew.wahab@arm.com>2015-09-22 09:35:17 +0000
committerMatthew Wahab <mwahab@gcc.gnu.org>2015-09-22 09:35:17 +0000
commit641c2f8b69f799a00d0fda696d480e10505257c3 (patch)
tree2220bcca76e385fc63c24283ae19a9119a3f587b /gcc/config/aarch64/atomics.md
parent6380d2bc38237e00e3d460882b4b0938bbb068b9 (diff)
downloadgcc-641c2f8b69f799a00d0fda696d480e10505257c3.zip
gcc-641c2f8b69f799a00d0fda696d480e10505257c3.tar.gz
gcc-641c2f8b69f799a00d0fda696d480e10505257c3.tar.bz2
[AArch64] Use atomic load-operate instructions for fetch-update patterns.
gcc/ 2015-09-22 Matthew Wahab <matthew.wahab@arm.com> * config/aarch64/aarch64-protos.h (aarch64_atomic_ldop_supported_p): Declare. * config/aarch64/aarch64.c (aarch64_atomic_ldop_supported_p): New. (enum aarch64_atomic_load_op_code): New. (aarch64_emit_atomic_load_op): New. (aarch64_gen_atomic_ldop): Update to support load-operate patterns. * config/aarch64/atomics.md (atomic_<atomic_optab><mode>): Change to an expander. (aarch64_atomic_<atomic_optab><mode>): New. (aarch64_atomic_<atomic_optab><mode>_lse): New. (atomic_fetch_<atomic_optab><mode>): Change to an expander. (aarch64_atomic_fetch_<atomic_optab><mode>): New. (aarch64_atomic_fetch_<atomic_optab><mode>_lse): New. gcc/testsuite/ 2015-09-22 Matthew Wahab <matthew.wahab@arm.com> * gcc.target/aarch64/atomic-inst-ldadd.c: New. * gcc.target/aarch64/atomic-inst-ldlogic.c: New. From-SVN: r228001
Diffstat (limited to 'gcc/config/aarch64/atomics.md')
-rw-r--r--gcc/config/aarch64/atomics.md101
1 files changed, 92 insertions, 9 deletions
diff --git a/gcc/config/aarch64/atomics.md b/gcc/config/aarch64/atomics.md
index 11a9d13..e0d8856 100644
--- a/gcc/config/aarch64/atomics.md
+++ b/gcc/config/aarch64/atomics.md
@@ -225,23 +225,63 @@
}
)
-(define_insn_and_split "atomic_<atomic_optab><mode>"
+(define_expand "atomic_<atomic_optab><mode>"
+ [(match_operand:ALLI 0 "aarch64_sync_memory_operand" "")
+ (atomic_op:ALLI
+ (match_operand:ALLI 1 "<atomic_op_operand>" "")
+ (match_operand:SI 2 "const_int_operand"))]
+ ""
+ {
+ rtx (*gen) (rtx, rtx, rtx);
+
+ /* Use an atomic load-operate instruction when possible. */
+ if (aarch64_atomic_ldop_supported_p (<CODE>))
+ gen = gen_aarch64_atomic_<atomic_optab><mode>_lse;
+ else
+ gen = gen_aarch64_atomic_<atomic_optab><mode>;
+
+ emit_insn (gen (operands[0], operands[1], operands[2]));
+
+ DONE;
+ }
+)
+
+(define_insn_and_split "aarch64_atomic_<atomic_optab><mode>"
+ [(set (match_operand:ALLI 0 "aarch64_sync_memory_operand" "+Q")
+ (unspec_volatile:ALLI
+ [(atomic_op:ALLI (match_dup 0)
+ (match_operand:ALLI 1 "<atomic_op_operand>" "r<const_atomic>"))
+ (match_operand:SI 2 "const_int_operand")]
+ UNSPECV_ATOMIC_OP))
+ (clobber (reg:CC CC_REGNUM))
+ (clobber (match_scratch:ALLI 3 "=&r"))
+ (clobber (match_scratch:SI 4 "=&r"))]
+ ""
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+ {
+ aarch64_split_atomic_op (<CODE>, NULL, operands[3], operands[0],
+ operands[1], operands[2], operands[4]);
+ DONE;
+ }
+)
+
+(define_insn_and_split "aarch64_atomic_<atomic_optab><mode>_lse"
[(set (match_operand:ALLI 0 "aarch64_sync_memory_operand" "+Q")
(unspec_volatile:ALLI
[(atomic_op:ALLI (match_dup 0)
(match_operand:ALLI 1 "<atomic_op_operand>" "r<const_atomic>"))
- (match_operand:SI 2 "const_int_operand")] ;; model
+ (match_operand:SI 2 "const_int_operand")]
UNSPECV_ATOMIC_OP))
- (clobber (reg:CC CC_REGNUM))
- (clobber (match_scratch:ALLI 3 "=&r"))
- (clobber (match_scratch:SI 4 "=&r"))]
- ""
+ (clobber (match_scratch:ALLI 3 "=&r"))]
+ "TARGET_LSE"
"#"
"&& reload_completed"
[(const_int 0)]
{
- aarch64_split_atomic_op (<CODE>, NULL, operands[3], operands[0],
- operands[1], operands[2], operands[4]);
+ aarch64_gen_atomic_ldop (<CODE>, operands[3], operands[0],
+ operands[1], operands[2]);
DONE;
}
)
@@ -268,7 +308,30 @@
}
)
-(define_insn_and_split "atomic_fetch_<atomic_optab><mode>"
+;; Load-operate-store, returning the updated memory data.
+
+(define_expand "atomic_fetch_<atomic_optab><mode>"
+ [(match_operand:ALLI 0 "register_operand" "")
+ (match_operand:ALLI 1 "aarch64_sync_memory_operand" "")
+ (atomic_op:ALLI
+ (match_operand:ALLI 2 "<atomic_op_operand>" "")
+ (match_operand:SI 3 "const_int_operand"))]
+ ""
+{
+ rtx (*gen) (rtx, rtx, rtx, rtx);
+
+ /* Use an atomic load-operate instruction when possible. */
+ if (aarch64_atomic_ldop_supported_p (<CODE>))
+ gen = gen_aarch64_atomic_fetch_<atomic_optab><mode>_lse;
+ else
+ gen = gen_aarch64_atomic_fetch_<atomic_optab><mode>;
+
+ emit_insn (gen (operands[0], operands[1], operands[2], operands[3]));
+
+ DONE;
+})
+
+(define_insn_and_split "aarch64_atomic_fetch_<atomic_optab><mode>"
[(set (match_operand:ALLI 0 "register_operand" "=&r")
(match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q"))
(set (match_dup 1)
@@ -291,6 +354,26 @@
}
)
+(define_insn_and_split "aarch64_atomic_fetch_<atomic_optab><mode>_lse"
+ [(set (match_operand:ALLI 0 "register_operand" "=&r")
+ (match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q"))
+ (set (match_dup 1)
+ (unspec_volatile:ALLI
+ [(atomic_op:ALLI (match_dup 1)
+ (match_operand:ALLI 2 "<atomic_op_operand>" "r<const_atomic>"))
+ (match_operand:SI 3 "const_int_operand")]
+ UNSPECV_ATOMIC_LDOP))]
+ "TARGET_LSE"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+ {
+ aarch64_gen_atomic_ldop (<CODE>, operands[0], operands[1],
+ operands[2], operands[3]);
+ DONE;
+ }
+)
+
(define_insn_and_split "atomic_fetch_nand<mode>"
[(set (match_operand:ALLI 0 "register_operand" "=&r")
(match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q"))