aboutsummaryrefslogtreecommitdiff
path: root/gcc/config/aarch64
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2019-09-19 14:36:29 +0000
committerRichard Henderson <rth@gcc.gnu.org>2019-09-19 07:36:29 -0700
commit4a2095ebace8534038ce2adf4ae94bfc854066c4 (patch)
tree982fd22424e7f0412c5d5081af3ef4498c98fc9c /gcc/config/aarch64
parente3f15286d1129de2cceee6acd5d5584cb5422db6 (diff)
downloadgcc-4a2095ebace8534038ce2adf4ae94bfc854066c4.zip
gcc-4a2095ebace8534038ce2adf4ae94bfc854066c4.tar.gz
gcc-4a2095ebace8534038ce2adf4ae94bfc854066c4.tar.bz2
aarch64: Implement TImode compare-and-swap
This pattern will only be used with the __sync functions, because we do not yet have a bare TImode atomic load. * config/aarch64/aarch64.c (aarch64_gen_compare_reg): Add support for NE comparison of TImode values. (aarch64_emit_load_exclusive): Add support for TImode. (aarch64_emit_store_exclusive): Likewise. (aarch64_split_compare_and_swap): Disable strong_zero_p for TImode. * config/aarch64/atomics.md (@atomic_compare_and_swap<ALLI_TI>): Change iterator from ALLI to ALLI_TI. (@atomic_compare_and_swap<JUST_TI>): New. (@atomic_compare_and_swap<JUST_TI>_lse): New. (aarch64_load_exclusive_pair): New. (aarch64_store_exclusive_pair): New. * config/aarch64/iterators.md (JUST_TI): New. From-SVN: r275965
Diffstat (limited to 'gcc/config/aarch64')
-rw-r--r--gcc/config/aarch64/aarch64.c48
-rw-r--r--gcc/config/aarch64/atomics.md93
-rw-r--r--gcc/config/aarch64/iterators.md3
3 files changed, 131 insertions, 13 deletions
diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
index 99d51e2..a5c4f55627 100644
--- a/gcc/config/aarch64/aarch64.c
+++ b/gcc/config/aarch64/aarch64.c
@@ -2039,10 +2039,33 @@ emit_set_insn (rtx x, rtx y)
rtx
aarch64_gen_compare_reg (RTX_CODE code, rtx x, rtx y)
{
- machine_mode mode = SELECT_CC_MODE (code, x, y);
- rtx cc_reg = gen_rtx_REG (mode, CC_REGNUM);
+ machine_mode cmp_mode = GET_MODE (x);
+ machine_mode cc_mode;
+ rtx cc_reg;
- emit_set_insn (cc_reg, gen_rtx_COMPARE (mode, x, y));
+ if (cmp_mode == TImode)
+ {
+ gcc_assert (code == NE);
+
+ cc_mode = CCmode;
+ cc_reg = gen_rtx_REG (cc_mode, CC_REGNUM);
+
+ rtx x_lo = operand_subword (x, 0, 0, TImode);
+ rtx y_lo = operand_subword (y, 0, 0, TImode);
+ emit_set_insn (cc_reg, gen_rtx_COMPARE (cc_mode, x_lo, y_lo));
+
+ rtx x_hi = operand_subword (x, 1, 0, TImode);
+ rtx y_hi = operand_subword (y, 1, 0, TImode);
+ emit_insn (gen_ccmpdi (cc_reg, cc_reg, x_hi, y_hi,
+ gen_rtx_EQ (cc_mode, cc_reg, const0_rtx),
+ GEN_INT (AARCH64_EQ)));
+ }
+ else
+ {
+ cc_mode = SELECT_CC_MODE (code, x, y);
+ cc_reg = gen_rtx_REG (cc_mode, CC_REGNUM);
+ emit_set_insn (cc_reg, gen_rtx_COMPARE (cc_mode, x, y));
+ }
return cc_reg;
}
@@ -2593,7 +2616,6 @@ aarch64_zero_extend_const_eq (machine_mode xmode, rtx x,
gcc_assert (r != NULL);
return rtx_equal_p (x, r);
}
-
/* Return TARGET if it is nonnull and a register of mode MODE.
Otherwise, return a fresh register of mode MODE if we can,
@@ -16814,16 +16836,26 @@ static void
aarch64_emit_load_exclusive (machine_mode mode, rtx rval,
rtx mem, rtx model_rtx)
{
- emit_insn (gen_aarch64_load_exclusive (mode, rval, mem, model_rtx));
+ if (mode == TImode)
+ emit_insn (gen_aarch64_load_exclusive_pair (gen_lowpart (DImode, rval),
+ gen_highpart (DImode, rval),
+ mem, model_rtx));
+ else
+ emit_insn (gen_aarch64_load_exclusive (mode, rval, mem, model_rtx));
}
/* Emit store exclusive. */
static void
aarch64_emit_store_exclusive (machine_mode mode, rtx bval,
- rtx rval, rtx mem, rtx model_rtx)
+ rtx mem, rtx rval, rtx model_rtx)
{
- emit_insn (gen_aarch64_store_exclusive (mode, bval, rval, mem, model_rtx));
+ if (mode == TImode)
+ emit_insn (gen_aarch64_store_exclusive_pair
+ (bval, mem, operand_subword (rval, 0, 0, TImode),
+ operand_subword (rval, 1, 0, TImode), model_rtx));
+ else
+ emit_insn (gen_aarch64_store_exclusive (mode, bval, mem, rval, model_rtx));
}
/* Mark the previous jump instruction as unlikely. */
@@ -16950,7 +16982,7 @@ aarch64_split_compare_and_swap (rtx operands[])
CBNZ scratch, .label1
.label2:
CMP rval, 0. */
- bool strong_zero_p = !is_weak && oldval == const0_rtx;
+ bool strong_zero_p = !is_weak && oldval == const0_rtx && mode != TImode;
label1 = NULL;
if (!is_weak)
diff --git a/gcc/config/aarch64/atomics.md b/gcc/config/aarch64/atomics.md
index a679270..f8bdd04 100644
--- a/gcc/config/aarch64/atomics.md
+++ b/gcc/config/aarch64/atomics.md
@@ -21,11 +21,11 @@
;; Instruction patterns.
(define_expand "@atomic_compare_and_swap<mode>"
- [(match_operand:SI 0 "register_operand") ;; bool out
- (match_operand:ALLI 1 "register_operand") ;; val out
- (match_operand:ALLI 2 "aarch64_sync_memory_operand") ;; memory
- (match_operand:ALLI 3 "nonmemory_operand") ;; expected
- (match_operand:ALLI 4 "aarch64_reg_or_zero") ;; desired
+ [(match_operand:SI 0 "register_operand" "") ;; bool out
+ (match_operand:ALLI_TI 1 "register_operand" "") ;; val out
+ (match_operand:ALLI_TI 2 "aarch64_sync_memory_operand" "") ;; memory
+ (match_operand:ALLI_TI 3 "nonmemory_operand" "") ;; expected
+ (match_operand:ALLI_TI 4 "aarch64_reg_or_zero" "") ;; desired
(match_operand:SI 5 "const_int_operand") ;; is_weak
(match_operand:SI 6 "const_int_operand") ;; mod_s
(match_operand:SI 7 "const_int_operand")] ;; mod_f
@@ -88,6 +88,30 @@
}
)
+(define_insn_and_split "@aarch64_compare_and_swap<mode>"
+ [(set (reg:CC CC_REGNUM) ;; bool out
+ (unspec_volatile:CC [(const_int 0)] UNSPECV_ATOMIC_CMPSW))
+ (set (match_operand:JUST_TI 0 "register_operand" "=&r") ;; val out
+ (match_operand:JUST_TI 1 "aarch64_sync_memory_operand" "+Q")) ;; memory
+ (set (match_dup 1)
+ (unspec_volatile:JUST_TI
+ [(match_operand:JUST_TI 2 "aarch64_reg_or_zero" "rZ") ;; expect
+ (match_operand:JUST_TI 3 "aarch64_reg_or_zero" "rZ") ;; desired
+ (match_operand:SI 4 "const_int_operand") ;; is_weak
+ (match_operand:SI 5 "const_int_operand") ;; mod_s
+ (match_operand:SI 6 "const_int_operand")] ;; mod_f
+ UNSPECV_ATOMIC_CMPSW))
+ (clobber (match_scratch:SI 7 "=&r"))]
+ ""
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+ {
+ aarch64_split_compare_and_swap (operands);
+ DONE;
+ }
+)
+
(define_insn "@aarch64_compare_and_swap<mode>_lse"
[(set (match_operand:SI 0 "register_operand" "+r") ;; val out
(zero_extend:SI
@@ -133,6 +157,28 @@
return "casal<atomic_sfx>\t%<w>0, %<w>2, %1";
})
+(define_insn "@aarch64_compare_and_swap<mode>_lse"
+ [(set (match_operand:JUST_TI 0 "register_operand" "+r") ;; val out
+ (match_operand:JUST_TI 1 "aarch64_sync_memory_operand" "+Q")) ;; memory
+ (set (match_dup 1)
+ (unspec_volatile:JUST_TI
+ [(match_dup 0) ;; expect
+ (match_operand:JUST_TI 2 "register_operand" "r") ;; desired
+ (match_operand:SI 3 "const_int_operand")] ;; mod_s
+ UNSPECV_ATOMIC_CMPSW))]
+ "TARGET_LSE"
+{
+ enum memmodel model = memmodel_from_int (INTVAL (operands[3]));
+ if (is_mm_relaxed (model))
+ return "casp\t%0, %R0, %2, %R2, %1";
+ else if (is_mm_acquire (model) || is_mm_consume (model))
+ return "caspa\t%0, %R0, %2, %R2, %1";
+ else if (is_mm_release (model))
+ return "caspl\t%0, %R0, %2, %R2, %1";
+ else
+ return "caspal\t%0, %R0, %2, %R2, %1";
+})
+
(define_expand "atomic_exchange<mode>"
[(match_operand:ALLI 0 "register_operand")
(match_operand:ALLI 1 "aarch64_sync_memory_operand")
@@ -581,6 +627,24 @@
}
)
+(define_insn "aarch64_load_exclusive_pair"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (unspec_volatile:DI
+ [(match_operand:TI 2 "aarch64_sync_memory_operand" "Q")
+ (match_operand:SI 3 "const_int_operand")]
+ UNSPECV_LX))
+ (set (match_operand:DI 1 "register_operand" "=r")
+ (unspec_volatile:DI [(match_dup 2) (match_dup 3)] UNSPECV_LX))]
+ ""
+ {
+ enum memmodel model = memmodel_from_int (INTVAL (operands[3]));
+ if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model))
+ return "ldxp\t%0, %1, %2";
+ else
+ return "ldaxp\t%0, %1, %2";
+ }
+)
+
(define_insn "@aarch64_store_exclusive<mode>"
[(set (match_operand:SI 0 "register_operand" "=&r")
(unspec_volatile:SI [(const_int 0)] UNSPECV_SX))
@@ -599,6 +663,25 @@
}
)
+(define_insn "aarch64_store_exclusive_pair"
+ [(set (match_operand:SI 0 "register_operand" "=&r")
+ (unspec_volatile:SI [(const_int 0)] UNSPECV_SX))
+ (set (match_operand:TI 1 "aarch64_sync_memory_operand" "=Q")
+ (unspec_volatile:TI
+ [(match_operand:DI 2 "aarch64_reg_or_zero" "rZ")
+ (match_operand:DI 3 "aarch64_reg_or_zero" "rZ")
+ (match_operand:SI 4 "const_int_operand")]
+ UNSPECV_SX))]
+ ""
+ {
+ enum memmodel model = memmodel_from_int (INTVAL (operands[3]));
+ if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_acquire (model))
+ return "stxp\t%w0, %x2, %x3, %1";
+ else
+ return "stlxp\t%w0, %x2, %x3, %1";
+ }
+)
+
(define_expand "mem_thread_fence"
[(match_operand:SI 0 "const_int_operand")]
""
diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md
index d23f0fc..03b3ce3 100644
--- a/gcc/config/aarch64/iterators.md
+++ b/gcc/config/aarch64/iterators.md
@@ -29,6 +29,9 @@
;; Iterator for HI, SI, DI, some instructions can only work on these modes.
(define_mode_iterator GPI_I16 [(HI "AARCH64_ISA_F16") SI DI])
+;; "Iterator" for just TI -- features like @pattern only work with iterators.
+(define_mode_iterator JUST_TI [TI])
+
;; Iterator for QI and HI modes
(define_mode_iterator SHORT [QI HI])