aboutsummaryrefslogtreecommitdiff
path: root/gcc/config/pa
diff options
context:
space:
mode:
authorJohn David Anglin <danglin@gcc.gnu.org>2015-09-25 00:01:34 +0000
committerJohn David Anglin <danglin@gcc.gnu.org>2015-09-25 00:01:34 +0000
commit7e7c9d40d17e0efb27691fe9dfc4c831d03c7ff2 (patch)
tree2b7872002777ddf16e2fc54b210c920b56922e35 /gcc/config/pa
parent4fe017f67af0b6ac09c568626227460d7a1209c1 (diff)
downloadgcc-7e7c9d40d17e0efb27691fe9dfc4c831d03c7ff2.zip
gcc-7e7c9d40d17e0efb27691fe9dfc4c831d03c7ff2.tar.gz
gcc-7e7c9d40d17e0efb27691fe9dfc4c831d03c7ff2.tar.bz2
pa-linux.h (HAVE_sync_compare_and_swapdi): Define.
* config/pa/pa-linux.h (HAVE_sync_compare_and_swapdi): Define. * config/pa/pa-protos.h (pa_maybe_emit_compare_and_swap_exchange_loop): Declare. * config/pa/pa.c (pa_init_libfuncs): Init sync libfuncs up to 8 bytes. (pa_expand_compare_and_swap_loop): New. (pa_maybe_emit_compare_and_swap_exchange_loop): New. * config/pa/pa.md (atomic_storeqi, atomic_storehi, atomic_storesi, atomic_storesf, atomic_loaddf, atomic_storedf): New expanders. (atomic_loaddf_1, atomic_storedf_1): New insn patterns. (atomic_loaddi, atomic_loaddi_1, atomic_storedi, atomic_storedi_1): Revise. From-SVN: r228104
Diffstat (limited to 'gcc/config/pa')
-rw-r--r--gcc/config/pa/pa-linux.h1
-rw-r--r--gcc/config/pa/pa-protos.h1
-rw-r--r--gcc/config/pa/pa.c77
-rw-r--r--gcc/config/pa/pa.md214
4 files changed, 270 insertions, 23 deletions
diff --git a/gcc/config/pa/pa-linux.h b/gcc/config/pa/pa-linux.h
index f8da185..957a274 100644
--- a/gcc/config/pa/pa-linux.h
+++ b/gcc/config/pa/pa-linux.h
@@ -140,3 +140,4 @@ along with GCC; see the file COPYING3. If not see
#define HAVE_sync_compare_and_swapqi 1
#define HAVE_sync_compare_and_swaphi 1
#define HAVE_sync_compare_and_swapsi 1
+#define HAVE_sync_compare_and_swapdi 1
diff --git a/gcc/config/pa/pa-protos.h b/gcc/config/pa/pa-protos.h
index 8bf2453..82ca9b2 100644
--- a/gcc/config/pa/pa-protos.h
+++ b/gcc/config/pa/pa-protos.h
@@ -79,6 +79,7 @@ extern enum direction pa_function_arg_padding (machine_mode, const_tree);
#endif /* ARGS_SIZE_RTX */
extern int pa_insn_refs_are_delayed (rtx_insn *);
extern rtx pa_get_deferred_plabel (rtx);
+extern rtx pa_maybe_emit_compare_and_swap_exchange_loop (rtx, rtx, rtx);
#endif /* RTX_CODE */
extern int pa_and_mask_p (unsigned HOST_WIDE_INT);
diff --git a/gcc/config/pa/pa.c b/gcc/config/pa/pa.c
index 44ad885..38daa5f 100644
--- a/gcc/config/pa/pa.c
+++ b/gcc/config/pa/pa.c
@@ -5749,7 +5749,7 @@ pa_init_libfuncs (void)
}
if (TARGET_SYNC_LIBCALL)
- init_sync_libfuncs (UNITS_PER_WORD);
+ init_sync_libfuncs (8);
}
/* HP's millicode routines mean something special to the assembler.
@@ -10555,4 +10555,79 @@ pa_output_addr_diff_vec (rtx lab, rtx body)
fputs ("\t.end_brtab\n", asm_out_file);
}
+/* This is a helper function for the other atomic operations. This function
+ emits a loop that contains SEQ that iterates until a compare-and-swap
+ operation at the end succeeds. MEM is the memory to be modified. SEQ is
+ a set of instructions that takes a value from OLD_REG as an input and
+ produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
+ set to the current contents of MEM. After SEQ, a compare-and-swap will
+ attempt to update MEM with NEW_REG. The function returns true when the
+ loop was generated successfully. */
+
+static bool
+pa_expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
+{
+ machine_mode mode = GET_MODE (mem);
+ rtx_code_label *label;
+ rtx cmp_reg, success, oldval;
+
+ /* The loop we want to generate looks like
+
+ cmp_reg = mem;
+ label:
+ old_reg = cmp_reg;
+ seq;
+ (success, cmp_reg) = compare-and-swap(mem, old_reg, new_reg)
+ if (success)
+ goto label;
+
+ Note that we only do the plain load from memory once. Subsequent
+ iterations use the value loaded by the compare-and-swap pattern. */
+
+ label = gen_label_rtx ();
+ cmp_reg = gen_reg_rtx (mode);
+
+ emit_move_insn (cmp_reg, mem);
+ emit_label (label);
+ emit_move_insn (old_reg, cmp_reg);
+ if (seq)
+ emit_insn (seq);
+
+ success = NULL_RTX;
+ oldval = cmp_reg;
+ if (!expand_atomic_compare_and_swap (&success, &oldval, mem, old_reg,
+ new_reg, false, MEMMODEL_SYNC_SEQ_CST,
+ MEMMODEL_RELAXED))
+ return false;
+
+ if (oldval != cmp_reg)
+ emit_move_insn (cmp_reg, oldval);
+
+ /* Mark this jump predicted not taken. */
+ emit_cmp_and_jump_insns (success, const0_rtx, EQ, const0_rtx,
+ GET_MODE (success), 1, label, 0);
+ return true;
+}
+
+/* This function tries to implement an atomic exchange operation using a
+ compare_and_swap loop. VAL is written to *MEM. The previous contents of
+ *MEM are returned, using TARGET if possible. No memory model is required
+ since a compare_and_swap loop is seq-cst. */
+
+rtx
+pa_maybe_emit_compare_and_swap_exchange_loop (rtx target, rtx mem, rtx val)
+{
+ machine_mode mode = GET_MODE (mem);
+
+ if (can_compare_and_swap_p (mode, true))
+ {
+ if (!target || !register_operand (target, mode))
+ target = gen_reg_rtx (mode);
+ if (pa_expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
+ return target;
+ }
+
+ return NULL_RTX;
+}
+
#include "gt-pa.h"
diff --git a/gcc/config/pa/pa.md b/gcc/config/pa/pa.md
index 8ea669a..36efb84 100644
--- a/gcc/config/pa/pa.md
+++ b/gcc/config/pa/pa.md
@@ -699,59 +699,229 @@
;; doubleword loads and stores are not guaranteed to be atomic
;; when referencing the I/O address space.
-;; Implement atomic DImode load using 64-bit floating point load and copy.
+;; The kernel cmpxchg operation on linux is not atomic with respect to
+;; memory stores on SMP machines, so we must do stores using a cmpxchg
+;; operation.
+
+;; Implement atomic QImode store using exchange.
+
+(define_expand "atomic_storeqi"
+ [(match_operand:QI 0 "memory_operand") ;; memory
+ (match_operand:QI 1 "register_operand") ;; val out
+ (match_operand:SI 2 "const_int_operand")] ;; model
+ ""
+{
+ if (TARGET_SYNC_LIBCALL)
+ {
+ rtx mem = operands[0];
+ rtx val = operands[1];
+ if (pa_maybe_emit_compare_and_swap_exchange_loop (NULL_RTX, mem, val))
+ DONE;
+ }
+ FAIL;
+})
+
+;; Implement atomic HImode stores using exchange.
+
+(define_expand "atomic_storehi"
+ [(match_operand:HI 0 "memory_operand") ;; memory
+ (match_operand:HI 1 "register_operand") ;; val out
+ (match_operand:SI 2 "const_int_operand")] ;; model
+ ""
+{
+ if (TARGET_SYNC_LIBCALL)
+ {
+ rtx mem = operands[0];
+ rtx val = operands[1];
+ if (pa_maybe_emit_compare_and_swap_exchange_loop (NULL_RTX, mem, val))
+ DONE;
+ }
+ FAIL;
+})
+
+;; Implement atomic SImode store using exchange.
+
+(define_expand "atomic_storesi"
+ [(match_operand:SI 0 "memory_operand") ;; memory
+ (match_operand:SI 1 "register_operand") ;; val out
+ (match_operand:SI 2 "const_int_operand")] ;; model
+ ""
+{
+ if (TARGET_SYNC_LIBCALL)
+ {
+ rtx mem = operands[0];
+ rtx val = operands[1];
+ if (pa_maybe_emit_compare_and_swap_exchange_loop (NULL_RTX, mem, val))
+ DONE;
+ }
+ FAIL;
+})
+
+;; Implement atomic SFmode store using exchange.
+
+(define_expand "atomic_storesf"
+ [(match_operand:SF 0 "memory_operand") ;; memory
+ (match_operand:SF 1 "register_operand") ;; val out
+ (match_operand:SI 2 "const_int_operand")] ;; model
+ ""
+{
+ if (TARGET_SYNC_LIBCALL)
+ {
+ rtx mem = operands[0];
+ rtx val = operands[1];
+ if (pa_maybe_emit_compare_and_swap_exchange_loop (NULL_RTX, mem, val))
+ DONE;
+ }
+ FAIL;
+})
+
+;; Implement atomic DImode load using 64-bit floating point load.
(define_expand "atomic_loaddi"
[(match_operand:DI 0 "register_operand") ;; val out
(match_operand:DI 1 "memory_operand") ;; memory
(match_operand:SI 2 "const_int_operand")] ;; model
- "!TARGET_64BIT && !TARGET_SOFT_FLOAT"
+ ""
{
- enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
+ enum memmodel model;
+
+ if (TARGET_64BIT || TARGET_SOFT_FLOAT)
+ FAIL;
+
+ model = memmodel_from_int (INTVAL (operands[2]));
operands[1] = force_reg (SImode, XEXP (operands[1], 0));
- operands[2] = gen_reg_rtx (DImode);
expand_mem_thread_fence (model);
- emit_insn (gen_atomic_loaddi_1 (operands[0], operands[1], operands[2]));
+ emit_insn (gen_atomic_loaddi_1 (operands[0], operands[1]));
if (is_mm_seq_cst (model))
expand_mem_thread_fence (model);
DONE;
})
(define_insn "atomic_loaddi_1"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (mem:DI (match_operand:SI 1 "register_operand" "r")))
- (clobber (match_operand:DI 2 "register_operand" "=&f"))]
+ [(set (match_operand:DI 0 "register_operand" "=f,r")
+ (mem:DI (match_operand:SI 1 "register_operand" "r,r")))
+ (clobber (match_scratch:DI 2 "=X,f"))]
"!TARGET_64BIT && !TARGET_SOFT_FLOAT"
- "{fldds|fldd} 0(%1),%2\;{fstds|fstd} %2,-16(%%sp)\;{ldws|ldw} -16(%%sp),%0\;{ldws|ldw} -12(%%sp),%R0"
- [(set_attr "type" "move")
- (set_attr "length" "16")])
+ "@
+ {fldds|fldd} 0(%1),%0
+ {fldds|fldd} 0(%1),%2\n\t{fstds|fstd} %2,-16(%%sp)\n\t{ldws|ldw} -16(%%sp),%0\n\t{ldws|ldw} -12(%%sp),%R0"
+ [(set_attr "type" "move,move")
+ (set_attr "length" "4,16")])
-;; Implement atomic DImode store using copy and 64-bit floating point store.
+;; Implement atomic DImode store.
(define_expand "atomic_storedi"
[(match_operand:DI 0 "memory_operand") ;; memory
(match_operand:DI 1 "register_operand") ;; val out
(match_operand:SI 2 "const_int_operand")] ;; model
- "!TARGET_64BIT && !TARGET_SOFT_FLOAT"
+ ""
{
- enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
+ enum memmodel model;
+
+ if (TARGET_SYNC_LIBCALL)
+ {
+ rtx mem = operands[0];
+ rtx val = operands[1];
+ if (pa_maybe_emit_compare_and_swap_exchange_loop (NULL_RTX, mem, val))
+ DONE;
+ }
+
+ if (TARGET_64BIT || TARGET_SOFT_FLOAT)
+ FAIL;
+
+ model = memmodel_from_int (INTVAL (operands[2]));
operands[0] = force_reg (SImode, XEXP (operands[0], 0));
- operands[2] = gen_reg_rtx (DImode);
expand_mem_thread_fence (model);
- emit_insn (gen_atomic_storedi_1 (operands[0], operands[1], operands[2]));
+ emit_insn (gen_atomic_storedi_1 (operands[0], operands[1]));
if (is_mm_seq_cst (model))
expand_mem_thread_fence (model);
DONE;
})
(define_insn "atomic_storedi_1"
- [(set (mem:DI (match_operand:SI 0 "register_operand" "r"))
- (match_operand:DI 1 "register_operand" "r"))
- (clobber (match_operand:DI 2 "register_operand" "=&f"))]
+ [(set (mem:DI (match_operand:SI 0 "register_operand" "r,r"))
+ (match_operand:DI 1 "register_operand" "f,r"))
+ (clobber (match_scratch:DI 2 "=X,f"))]
+ "!TARGET_64BIT && !TARGET_SOFT_FLOAT && !TARGET_SYNC_LIBCALL"
+ "@
+ {fstds|fstd} %1,0(%0)
+ {stws|stw} %1,-16(%%sp)\n\t{stws|stw} %R1,-12(%%sp)\n\t{fldds|fldd} -16(%%sp),%2\n\t{fstds|fstd} %2,0(%0)"
+ [(set_attr "type" "move,move")
+ (set_attr "length" "4,16")])
+
+;; Implement atomic DFmode load using 64-bit floating point load.
+
+(define_expand "atomic_loaddf"
+ [(match_operand:DF 0 "register_operand") ;; val out
+ (match_operand:DF 1 "memory_operand") ;; memory
+ (match_operand:SI 2 "const_int_operand")] ;; model
+ ""
+{
+ enum memmodel model;
+
+ if (TARGET_64BIT || TARGET_SOFT_FLOAT)
+ FAIL;
+
+ model = memmodel_from_int (INTVAL (operands[2]));
+ operands[1] = force_reg (SImode, XEXP (operands[1], 0));
+ expand_mem_thread_fence (model);
+ emit_insn (gen_atomic_loaddf_1 (operands[0], operands[1]));
+ if (is_mm_seq_cst (model))
+ expand_mem_thread_fence (model);
+ DONE;
+})
+
+(define_insn "atomic_loaddf_1"
+ [(set (match_operand:DF 0 "register_operand" "=f,r")
+ (mem:DF (match_operand:SI 1 "register_operand" "r,r")))
+ (clobber (match_scratch:DF 2 "=X,f"))]
"!TARGET_64BIT && !TARGET_SOFT_FLOAT"
- "{stws|stw} %1,-16(%%sp)\;{stws|stw} %R1,-12(%%sp)\;{fldds|fldd} -16(%%sp),%2\;{fstds|fstd} %2,0(%0)"
- [(set_attr "type" "move")
- (set_attr "length" "16")])
+ "@
+ {fldds|fldd} 0(%1),%0
+ {fldds|fldd} 0(%1),%2\n\t{fstds|fstd} %2,-16(%%sp)\n\t{ldws|ldw} -16(%%sp),%0\n\t{ldws|ldw} -12(%%sp),%R0"
+ [(set_attr "type" "move,move")
+ (set_attr "length" "4,16")])
+
+;; Implement atomic DFmode store using 64-bit floating point store.
+
+(define_expand "atomic_storedf"
+ [(match_operand:DF 0 "memory_operand") ;; memory
+ (match_operand:DF 1 "register_operand") ;; val out
+ (match_operand:SI 2 "const_int_operand")] ;; model
+ ""
+{
+ enum memmodel model;
+
+ if (TARGET_SYNC_LIBCALL)
+ {
+ rtx mem = operands[0];
+ rtx val = operands[1];
+ if (pa_maybe_emit_compare_and_swap_exchange_loop (NULL_RTX, mem, val))
+ DONE;
+ }
+
+ if (TARGET_64BIT || TARGET_SOFT_FLOAT)
+ FAIL;
+
+ model = memmodel_from_int (INTVAL (operands[2]));
+ operands[0] = force_reg (SImode, XEXP (operands[0], 0));
+ expand_mem_thread_fence (model);
+ emit_insn (gen_atomic_storedf_1 (operands[0], operands[1]));
+ if (is_mm_seq_cst (model))
+ expand_mem_thread_fence (model);
+ DONE;
+})
+
+(define_insn "atomic_storedf_1"
+ [(set (mem:DF (match_operand:SI 0 "register_operand" "r,r"))
+ (match_operand:DF 1 "register_operand" "f,r"))
+ (clobber (match_scratch:DF 2 "=X,f"))]
+ "!TARGET_64BIT && !TARGET_SOFT_FLOAT"
+ "@
+ {fstds|fstd} %1,0(%0)
+ {stws|stw} %1,-16(%%sp)\n\t{stws|stw} %R1,-12(%%sp)\n\t{fldds|fldd} -16(%%sp),%2\n\t{fstds|fstd} %2,0(%0)"
+ [(set_attr "type" "move,move")
+ (set_attr "length" "4,16")])
;; Compare instructions.
;; This controls RTL generation and register allocation.