aboutsummaryrefslogtreecommitdiff
path: root/gcc/config
diff options
context:
space:
mode:
authorAndrew MacLeod <amacleod@redhat.com>2015-05-12 20:01:47 +0000
committerAndrew Macleod <amacleod@gcc.gnu.org>2015-05-12 20:01:47 +0000
commit46b35980b831a980f762753b64c83e1ab8eac880 (patch)
tree46ecdae55c6167da9fe60411b19d39051ad5e4c1 /gcc/config
parente7a677ca1a53221276c0b382811c0351d381b35a (diff)
downloadgcc-46b35980b831a980f762753b64c83e1ab8eac880.zip
gcc-46b35980b831a980f762753b64c83e1ab8eac880.tar.gz
gcc-46b35980b831a980f762753b64c83e1ab8eac880.tar.bz2
re PR target/65697 (__atomic memory barriers not strong enough for __sync builtins)
2015-05-12 Andrew MacLeod <amacleod@redhat.com> PR target/65697 * coretypes.h (MEMMODEL_SYNC, MEMMODEL_BASE_MASK): New macros. (enum memmodel): Add SYNC_{ACQUIRE,RELEASE,SEQ_CST}. * tree.h (memmodel_from_int, memmodel_base, is_mm_relaxed, is_mm_consume,is_mm_acquire, is_mm_release, is_mm_acq_rel, is_mm_seq_cst, is_mm_sync): New accessor functions. * builtins.c (expand_builtin_sync_operation, expand_builtin_compare_and_swap): Use MEMMODEL_SYNC_SEQ_CST. (expand_builtin_sync_lock_release): Use MEMMODEL_SYNC_RELEASE. (get_memmodel, expand_builtin_atomic_compare_exchange, expand_builtin_atomic_load, expand_builtin_atomic_store, expand_builtin_atomic_clear): Use new accessor routines. (expand_builtin_sync_synchronize): Use MEMMODEL_SYNC_SEQ_CST. * optabs.c (expand_compare_and_swap_loop): Use MEMMODEL_SYNC_SEQ_CST. (maybe_emit_sync_lock_test_and_set): Use new accessors and MEMMODEL_SYNC_ACQUIRE. (expand_sync_lock_test_and_set): Use MEMMODEL_SYNC_ACQUIRE. (expand_mem_thread_fence, expand_mem_signal_fence, expand_atomic_load, expand_atomic_store): Use new accessors. * emit-rtl.c (need_atomic_barrier_p): Add additional enum cases. * tsan.c (instrument_builtin_call): Update check for memory model beyond final enum to use MEMMODEL_LAST. * c-family/c-common.c: Use new accessor for memmodel_base. * config/aarch64/aarch64.c (aarch64_expand_compare_and_swap): Use new accessors. * config/aarch64/atomics.md (atomic_load<mode>,atomic_store<mode>, arch64_load_exclusive<mode>, aarch64_store_exclusive<mode>, mem_thread_fence, *dmb): Likewise. * config/alpha/alpha.c (alpha_split_compare_and_swap, alpha_split_compare_and_swap_12): Likewise. * config/arm/arm.c (arm_expand_compare_and_swap, arm_split_compare_and_swap, arm_split_atomic_op): Likewise. * config/arm/sync.md (atomic_load<mode>, atomic_store<mode>, atomic_loaddi): Likewise. * config/i386/i386.c (ix86_destroy_cost_data, ix86_memmodel_check): Likewise. * config/i386/sync.md (mem_thread_fence, atomic_store<mode>): Likewise. * config/ia64/ia64.c (ia64_expand_atomic_op): Add new memmodel cases and use new accessors. * config/ia64/sync.md (mem_thread_fence, atomic_load<mode>, atomic_store<mode>, atomic_compare_and_swap<mode>, atomic_exchange<mode>): Use new accessors. * config/mips/mips.c (mips_process_sync_loop): Likewise. * config/pa/pa.md (atomic_loaddi, atomic_storedi): Likewise. * config/rs6000/rs6000.c (rs6000_pre_atomic_barrier, rs6000_post_atomic_barrier): Add new cases. (rs6000_expand_atomic_compare_and_swap): Use new accessors. * config/rs6000/sync.md (mem_thread_fence): Add new cases. (atomic_load<mode>): Add new cases and use new accessors. (store_quadpti): Add new cases. * config/s390/s390.md (mem_thread_fence, atomic_store<mode>): Use new accessors. * config/sparc/sparc.c (sparc_emit_membar_for_model): Use new accessors. * doc/extend.texi: Update docs to indicate 16 bits are used for memory model, not 8. From-SVN: r223096
Diffstat (limited to 'gcc/config')
-rw-r--r--gcc/config/aarch64/aarch64.c4
-rw-r--r--gcc/config/aarch64/atomics.md38
-rw-r--r--gcc/config/alpha/alpha.c16
-rw-r--r--gcc/config/arm/arm.c36
-rw-r--r--gcc/config/arm/sync.md16
-rw-r--r--gcc/config/i386/i386.c8
-rw-r--r--gcc/config/i386/sync.md10
-rw-r--r--gcc/config/ia64/ia64.c9
-rw-r--r--gcc/config/ia64/sync.md18
-rw-r--r--gcc/config/mips/mips.c2
-rw-r--r--gcc/config/pa/pa.md8
-rw-r--r--gcc/config/rs6000/rs6000.c14
-rw-r--r--gcc/config/rs6000/sync.md15
-rw-r--r--gcc/config/s390/s390.md6
-rw-r--r--gcc/config/sparc/sparc.c10
15 files changed, 105 insertions, 105 deletions
diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
index f866c7e..7f0cc0d 100644
--- a/gcc/config/aarch64/aarch64.c
+++ b/gcc/config/aarch64/aarch64.c
@@ -9203,8 +9203,8 @@ aarch64_expand_compare_and_swap (rtx operands[])
unlikely event of fail being ACQUIRE and succ being RELEASE we need to
promote succ to ACQ_REL so that we don't lose the acquire semantics. */
- if (INTVAL (mod_f) == MEMMODEL_ACQUIRE
- && INTVAL (mod_s) == MEMMODEL_RELEASE)
+ if (is_mm_acquire (memmodel_from_int (INTVAL (mod_f)))
+ && is_mm_release (memmodel_from_int (INTVAL (mod_s))))
mod_s = GEN_INT (MEMMODEL_ACQ_REL);
switch (mode)
diff --git a/gcc/config/aarch64/atomics.md b/gcc/config/aarch64/atomics.md
index 939a11e..1a38ac0 100644
--- a/gcc/config/aarch64/atomics.md
+++ b/gcc/config/aarch64/atomics.md
@@ -260,10 +260,8 @@
UNSPECV_LDA))]
""
{
- enum memmodel model = (enum memmodel) INTVAL (operands[2]);
- if (model == MEMMODEL_RELAXED
- || model == MEMMODEL_CONSUME
- || model == MEMMODEL_RELEASE)
+ enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
+ if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model))
return "ldr<atomic_sfx>\t%<w>0, %1";
else
return "ldar<atomic_sfx>\t%<w>0, %1";
@@ -278,10 +276,8 @@
UNSPECV_STL))]
""
{
- enum memmodel model = (enum memmodel) INTVAL (operands[2]);
- if (model == MEMMODEL_RELAXED
- || model == MEMMODEL_CONSUME
- || model == MEMMODEL_ACQUIRE)
+ enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
+ if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_acquire (model))
return "str<atomic_sfx>\t%<w>1, %0";
else
return "stlr<atomic_sfx>\t%<w>1, %0";
@@ -297,10 +293,8 @@
UNSPECV_LX)))]
""
{
- enum memmodel model = (enum memmodel) INTVAL (operands[2]);
- if (model == MEMMODEL_RELAXED
- || model == MEMMODEL_CONSUME
- || model == MEMMODEL_RELEASE)
+ enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
+ if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model))
return "ldxr<atomic_sfx>\t%w0, %1";
else
return "ldaxr<atomic_sfx>\t%w0, %1";
@@ -315,10 +309,8 @@
UNSPECV_LX))]
""
{
- enum memmodel model = (enum memmodel) INTVAL (operands[2]);
- if (model == MEMMODEL_RELAXED
- || model == MEMMODEL_CONSUME
- || model == MEMMODEL_RELEASE)
+ enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
+ if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model))
return "ldxr\t%<w>0, %1";
else
return "ldaxr\t%<w>0, %1";
@@ -335,10 +327,8 @@
UNSPECV_SX))]
""
{
- enum memmodel model = (enum memmodel) INTVAL (operands[3]);
- if (model == MEMMODEL_RELAXED
- || model == MEMMODEL_CONSUME
- || model == MEMMODEL_ACQUIRE)
+ enum memmodel model = memmodel_from_int (INTVAL (operands[3]));
+ if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_acquire (model))
return "stxr<atomic_sfx>\t%w0, %<w>2, %1";
else
return "stlxr<atomic_sfx>\t%w0, %<w>2, %1";
@@ -349,8 +339,8 @@
[(match_operand:SI 0 "const_int_operand" "")]
""
{
- enum memmodel model = (enum memmodel) INTVAL (operands[0]);
- if (model != MEMMODEL_RELAXED && model != MEMMODEL_CONSUME)
+ enum memmodel model = memmodel_from_int (INTVAL (operands[0]));
+ if (!(is_mm_relaxed (model) || is_mm_consume (model)))
emit_insn (gen_dmb (operands[0]));
DONE;
}
@@ -373,8 +363,8 @@
UNSPEC_MB))]
""
{
- enum memmodel model = (enum memmodel) INTVAL (operands[1]);
- if (model == MEMMODEL_ACQUIRE)
+ enum memmodel model = memmodel_from_int (INTVAL (operands[1]));
+ if (is_mm_acquire (model))
return "dmb\\tishld";
else
return "dmb\\tish";
diff --git a/gcc/config/alpha/alpha.c b/gcc/config/alpha/alpha.c
index 17024990..4af0dbe 100644
--- a/gcc/config/alpha/alpha.c
+++ b/gcc/config/alpha/alpha.c
@@ -4493,8 +4493,8 @@ alpha_split_compare_and_swap (rtx operands[])
oldval = operands[3];
newval = operands[4];
is_weak = (operands[5] != const0_rtx);
- mod_s = (enum memmodel) INTVAL (operands[6]);
- mod_f = (enum memmodel) INTVAL (operands[7]);
+ mod_s = memmodel_from_int (INTVAL (operands[6]));
+ mod_f = memmodel_from_int (INTVAL (operands[7]));
mode = GET_MODE (mem);
alpha_pre_atomic_barrier (mod_s);
@@ -4532,12 +4532,12 @@ alpha_split_compare_and_swap (rtx operands[])
emit_unlikely_jump (x, label1);
}
- if (mod_f != MEMMODEL_RELAXED)
+ if (!is_mm_relaxed (mod_f))
emit_label (XEXP (label2, 0));
alpha_post_atomic_barrier (mod_s);
- if (mod_f == MEMMODEL_RELAXED)
+ if (is_mm_relaxed (mod_f))
emit_label (XEXP (label2, 0));
}
@@ -4598,8 +4598,8 @@ alpha_split_compare_and_swap_12 (rtx operands[])
newval = operands[4];
align = operands[5];
is_weak = (operands[6] != const0_rtx);
- mod_s = (enum memmodel) INTVAL (operands[7]);
- mod_f = (enum memmodel) INTVAL (operands[8]);
+ mod_s = memmodel_from_int (INTVAL (operands[7]));
+ mod_f = memmodel_from_int (INTVAL (operands[8]));
scratch = operands[9];
mode = GET_MODE (orig_mem);
addr = XEXP (orig_mem, 0);
@@ -4651,12 +4651,12 @@ alpha_split_compare_and_swap_12 (rtx operands[])
emit_unlikely_jump (x, label1);
}
- if (mod_f != MEMMODEL_RELAXED)
+ if (!is_mm_relaxed (mod_f))
emit_label (XEXP (label2, 0));
alpha_post_atomic_barrier (mod_s);
- if (mod_f == MEMMODEL_RELAXED)
+ if (is_mm_relaxed (mod_f))
emit_label (XEXP (label2, 0));
}
diff --git a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c
index e26f40c..19b7385 100644
--- a/gcc/config/arm/arm.c
+++ b/gcc/config/arm/arm.c
@@ -27461,8 +27461,8 @@ arm_expand_compare_and_swap (rtx operands[])
promote succ to ACQ_REL so that we don't lose the acquire semantics. */
if (TARGET_HAVE_LDACQ
- && INTVAL (mod_f) == MEMMODEL_ACQUIRE
- && INTVAL (mod_s) == MEMMODEL_RELEASE)
+ && is_mm_acquire (memmodel_from_int (INTVAL (mod_f)))
+ && is_mm_release (memmodel_from_int (INTVAL (mod_s))))
mod_s = GEN_INT (MEMMODEL_ACQ_REL);
switch (mode)
@@ -27535,20 +27535,18 @@ arm_split_compare_and_swap (rtx operands[])
oldval = operands[2];
newval = operands[3];
is_weak = (operands[4] != const0_rtx);
- mod_s = (enum memmodel) INTVAL (operands[5]);
- mod_f = (enum memmodel) INTVAL (operands[6]);
+ mod_s = memmodel_from_int (INTVAL (operands[5]));
+ mod_f = memmodel_from_int (INTVAL (operands[6]));
scratch = operands[7];
mode = GET_MODE (mem);
bool use_acquire = TARGET_HAVE_LDACQ
- && !(mod_s == MEMMODEL_RELAXED
- || mod_s == MEMMODEL_CONSUME
- || mod_s == MEMMODEL_RELEASE);
-
+ && !(is_mm_relaxed (mod_s) || is_mm_consume (mod_s)
+ || is_mm_release (mod_s));
+
bool use_release = TARGET_HAVE_LDACQ
- && !(mod_s == MEMMODEL_RELAXED
- || mod_s == MEMMODEL_CONSUME
- || mod_s == MEMMODEL_ACQUIRE);
+ && !(is_mm_relaxed (mod_s) || is_mm_consume (mod_s)
+ || is_mm_acquire (mod_s));
/* Checks whether a barrier is needed and emits one accordingly. */
if (!(use_acquire || use_release))
@@ -27586,14 +27584,14 @@ arm_split_compare_and_swap (rtx operands[])
emit_unlikely_jump (gen_rtx_SET (pc_rtx, x));
}
- if (mod_f != MEMMODEL_RELAXED)
+ if (!is_mm_relaxed (mod_f))
emit_label (label2);
/* Checks whether a barrier is needed and emits one accordingly. */
if (!(use_acquire || use_release))
arm_post_atomic_barrier (mod_s);
- if (mod_f == MEMMODEL_RELAXED)
+ if (is_mm_relaxed (mod_f))
emit_label (label2);
}
@@ -27601,21 +27599,19 @@ void
arm_split_atomic_op (enum rtx_code code, rtx old_out, rtx new_out, rtx mem,
rtx value, rtx model_rtx, rtx cond)
{
- enum memmodel model = (enum memmodel) INTVAL (model_rtx);
+ enum memmodel model = memmodel_from_int (INTVAL (model_rtx));
machine_mode mode = GET_MODE (mem);
machine_mode wmode = (mode == DImode ? DImode : SImode);
rtx_code_label *label;
rtx x;
bool use_acquire = TARGET_HAVE_LDACQ
- && !(model == MEMMODEL_RELAXED
- || model == MEMMODEL_CONSUME
- || model == MEMMODEL_RELEASE);
+ && !(is_mm_relaxed (model) || is_mm_consume (model)
+ || is_mm_release (model));
bool use_release = TARGET_HAVE_LDACQ
- && !(model == MEMMODEL_RELAXED
- || model == MEMMODEL_CONSUME
- || model == MEMMODEL_ACQUIRE);
+ && !(is_mm_relaxed (model) || is_mm_consume (model)
+ || is_mm_acquire (model));
/* Checks whether a barrier is needed and emits one accordingly. */
if (!(use_acquire || use_release))
diff --git a/gcc/config/arm/sync.md b/gcc/config/arm/sync.md
index 78bdafc..44cda61 100644
--- a/gcc/config/arm/sync.md
+++ b/gcc/config/arm/sync.md
@@ -73,10 +73,8 @@
VUNSPEC_LDA))]
"TARGET_HAVE_LDACQ"
{
- enum memmodel model = (enum memmodel) INTVAL (operands[2]);
- if (model == MEMMODEL_RELAXED
- || model == MEMMODEL_CONSUME
- || model == MEMMODEL_RELEASE)
+ enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
+ if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model))
return \"ldr<sync_sfx>\\t%0, %1\";
else
return \"lda<sync_sfx>\\t%0, %1\";
@@ -91,10 +89,8 @@
VUNSPEC_STL))]
"TARGET_HAVE_LDACQ"
{
- enum memmodel model = (enum memmodel) INTVAL (operands[2]);
- if (model == MEMMODEL_RELAXED
- || model == MEMMODEL_CONSUME
- || model == MEMMODEL_ACQUIRE)
+ enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
+ if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_acquire (model))
return \"str<sync_sfx>\t%1, %0\";
else
return \"stl<sync_sfx>\t%1, %0\";
@@ -110,10 +106,10 @@
(match_operand:SI 2 "const_int_operand")] ;; model
"TARGET_HAVE_LDREXD && ARM_DOUBLEWORD_ALIGN"
{
- enum memmodel model = (enum memmodel) INTVAL (operands[2]);
+ enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
expand_mem_thread_fence (model);
emit_insn (gen_atomic_loaddi_1 (operands[0], operands[1]));
- if (model == MEMMODEL_SEQ_CST)
+ if (is_mm_seq_cst (model))
expand_mem_thread_fence (model);
DONE;
})
diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c
index 1f13097..d87dd01 100644
--- a/gcc/config/i386/i386.c
+++ b/gcc/config/i386/i386.c
@@ -51222,7 +51222,7 @@ ix86_destroy_cost_data (void *data)
static unsigned HOST_WIDE_INT
ix86_memmodel_check (unsigned HOST_WIDE_INT val)
{
- unsigned HOST_WIDE_INT model = val & MEMMODEL_MASK;
+ enum memmodel model = memmodel_from_int (val);
bool strong;
if (val & ~(unsigned HOST_WIDE_INT)(IX86_HLE_ACQUIRE|IX86_HLE_RELEASE
@@ -51233,14 +51233,14 @@ ix86_memmodel_check (unsigned HOST_WIDE_INT val)
"Unknown architecture specific memory model");
return MEMMODEL_SEQ_CST;
}
- strong = (model == MEMMODEL_ACQ_REL || model == MEMMODEL_SEQ_CST);
- if (val & IX86_HLE_ACQUIRE && !(model == MEMMODEL_ACQUIRE || strong))
+ strong = (is_mm_acq_rel (model) || is_mm_seq_cst (model));
+ if (val & IX86_HLE_ACQUIRE && !(is_mm_acquire (model) || strong))
{
warning (OPT_Winvalid_memory_model,
"HLE_ACQUIRE not used with ACQUIRE or stronger memory model");
return MEMMODEL_SEQ_CST | IX86_HLE_ACQUIRE;
}
- if (val & IX86_HLE_RELEASE && !(model == MEMMODEL_RELEASE || strong))
+ if (val & IX86_HLE_RELEASE && !(is_mm_release (model) || strong))
{
warning (OPT_Winvalid_memory_model,
"HLE_RELEASE not used with RELEASE or stronger memory model");
diff --git a/gcc/config/i386/sync.md b/gcc/config/i386/sync.md
index 61a2a81..59573d4 100644
--- a/gcc/config/i386/sync.md
+++ b/gcc/config/i386/sync.md
@@ -105,11 +105,11 @@
[(match_operand:SI 0 "const_int_operand")] ;; model
""
{
- enum memmodel model = (enum memmodel) (INTVAL (operands[0]) & MEMMODEL_MASK);
+ enum memmodel model = memmodel_from_int (INTVAL (operands[0]));
/* Unless this is a SEQ_CST fence, the i386 memory model is strong
enough not to require barriers of any kind. */
- if (model == MEMMODEL_SEQ_CST)
+ if (is_mm_seq_cst (model))
{
rtx (*mfence_insn)(rtx);
rtx mem;
@@ -217,7 +217,7 @@
UNSPEC_STA))]
""
{
- enum memmodel model = (enum memmodel) (INTVAL (operands[2]) & MEMMODEL_MASK);
+ enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
if (<MODE>mode == DImode && !TARGET_64BIT)
{
@@ -233,7 +233,7 @@
operands[1] = force_reg (<MODE>mode, operands[1]);
/* For seq-cst stores, when we lack MFENCE, use XCHG. */
- if (model == MEMMODEL_SEQ_CST && !(TARGET_64BIT || TARGET_SSE2))
+ if (is_mm_seq_cst (model) && !(TARGET_64BIT || TARGET_SSE2))
{
emit_insn (gen_atomic_exchange<mode> (gen_reg_rtx (<MODE>mode),
operands[0], operands[1],
@@ -246,7 +246,7 @@
operands[2]));
}
/* ... followed by an MFENCE, if required. */
- if (model == MEMMODEL_SEQ_CST)
+ if (is_mm_seq_cst (model))
emit_insn (gen_mem_thread_fence (operands[2]));
DONE;
})
diff --git a/gcc/config/ia64/ia64.c b/gcc/config/ia64/ia64.c
index 380088e..c1e2ecd 100644
--- a/gcc/config/ia64/ia64.c
+++ b/gcc/config/ia64/ia64.c
@@ -2386,10 +2386,12 @@ ia64_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
{
case MEMMODEL_ACQ_REL:
case MEMMODEL_SEQ_CST:
+ case MEMMODEL_SYNC_SEQ_CST:
emit_insn (gen_memory_barrier ());
/* FALLTHRU */
case MEMMODEL_RELAXED:
case MEMMODEL_ACQUIRE:
+ case MEMMODEL_SYNC_ACQUIRE:
case MEMMODEL_CONSUME:
if (mode == SImode)
icode = CODE_FOR_fetchadd_acq_si;
@@ -2397,6 +2399,7 @@ ia64_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
icode = CODE_FOR_fetchadd_acq_di;
break;
case MEMMODEL_RELEASE:
+ case MEMMODEL_SYNC_RELEASE:
if (mode == SImode)
icode = CODE_FOR_fetchadd_rel_si;
else
@@ -2423,8 +2426,7 @@ ia64_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
front half of the full barrier. The end half is the cmpxchg.rel.
For relaxed and release memory models, we don't need this. But we
also don't bother trying to prevent it either. */
- gcc_assert (model == MEMMODEL_RELAXED
- || model == MEMMODEL_RELEASE
+ gcc_assert (is_mm_relaxed (model) || is_mm_release (model)
|| MEM_VOLATILE_P (mem));
old_reg = gen_reg_rtx (DImode);
@@ -2468,6 +2470,7 @@ ia64_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
{
case MEMMODEL_RELAXED:
case MEMMODEL_ACQUIRE:
+ case MEMMODEL_SYNC_ACQUIRE:
case MEMMODEL_CONSUME:
switch (mode)
{
@@ -2481,8 +2484,10 @@ ia64_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
break;
case MEMMODEL_RELEASE:
+ case MEMMODEL_SYNC_RELEASE:
case MEMMODEL_ACQ_REL:
case MEMMODEL_SEQ_CST:
+ case MEMMODEL_SYNC_SEQ_CST:
switch (mode)
{
case QImode: icode = CODE_FOR_cmpxchg_rel_qi; break;
diff --git a/gcc/config/ia64/sync.md b/gcc/config/ia64/sync.md
index 75d746d..9c178b8 100644
--- a/gcc/config/ia64/sync.md
+++ b/gcc/config/ia64/sync.md
@@ -33,7 +33,7 @@
[(match_operand:SI 0 "const_int_operand" "")] ;; model
""
{
- if (INTVAL (operands[0]) == MEMMODEL_SEQ_CST)
+ if (is_mm_seq_cst (memmodel_from_int (INTVAL (operands[0]))))
emit_insn (gen_memory_barrier ());
DONE;
})
@@ -60,11 +60,11 @@
(match_operand:SI 2 "const_int_operand" "")] ;; model
""
{
- enum memmodel model = (enum memmodel) INTVAL (operands[2]);
+ enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
/* Unless the memory model is relaxed, we want to emit ld.acq, which
will happen automatically for volatile memories. */
- gcc_assert (model == MEMMODEL_RELAXED || MEM_VOLATILE_P (operands[1]));
+ gcc_assert (is_mm_relaxed (model) || MEM_VOLATILE_P (operands[1]));
emit_move_insn (operands[0], operands[1]);
DONE;
})
@@ -75,17 +75,17 @@
(match_operand:SI 2 "const_int_operand" "")] ;; model
""
{
- enum memmodel model = (enum memmodel) INTVAL (operands[2]);
+ enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
/* Unless the memory model is relaxed, we want to emit st.rel, which
will happen automatically for volatile memories. */
- gcc_assert (model == MEMMODEL_RELAXED || MEM_VOLATILE_P (operands[0]));
+ gcc_assert (is_mm_relaxed (model) || MEM_VOLATILE_P (operands[0]));
emit_move_insn (operands[0], operands[1]);
/* Sequentially consistent stores need a subsequent MF. See
http://www.decadent.org.uk/pipermail/cpp-threads/2008-December/001952.html
for a discussion of why a MF is needed here, but not for atomic_load. */
- if (model == MEMMODEL_SEQ_CST)
+ if (is_mm_seq_cst (model))
emit_insn (gen_memory_barrier ());
DONE;
})
@@ -101,7 +101,8 @@
(match_operand:SI 7 "const_int_operand" "")] ;; fail model
""
{
- enum memmodel model = (enum memmodel) INTVAL (operands[6]);
+ /* No need to distinquish __sync from __atomic, so get base value. */
+ enum memmodel model = memmodel_base (INTVAL (operands[6]));
rtx ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
rtx dval, eval;
@@ -200,7 +201,8 @@
(match_operand:SI 3 "const_int_operand" "")] ;; succ model
""
{
- enum memmodel model = (enum memmodel) INTVAL (operands[3]);
+ /* No need to distinquish __sync from __atomic, so get base value. */
+ enum memmodel model = memmodel_base (INTVAL (operands[3]));
switch (model)
{
diff --git a/gcc/config/mips/mips.c b/gcc/config/mips/mips.c
index 16ed5f0..c6e40a1 100644
--- a/gcc/config/mips/mips.c
+++ b/gcc/config/mips/mips.c
@@ -13106,7 +13106,7 @@ mips_process_sync_loop (rtx_insn *insn, rtx *operands)
model = MEMMODEL_ACQUIRE;
break;
default:
- model = (enum memmodel) INTVAL (operands[memmodel_attr]);
+ model = memmodel_from_int (INTVAL (operands[memmodel_attr]));
}
mips_multi_start ();
diff --git a/gcc/config/pa/pa.md b/gcc/config/pa/pa.md
index 0e9b2f6..cc077a4 100644
--- a/gcc/config/pa/pa.md
+++ b/gcc/config/pa/pa.md
@@ -707,12 +707,12 @@
(match_operand:SI 2 "const_int_operand")] ;; model
"!TARGET_64BIT && !TARGET_SOFT_FLOAT"
{
- enum memmodel model = (enum memmodel) INTVAL (operands[2]);
+ enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
operands[1] = force_reg (SImode, XEXP (operands[1], 0));
operands[2] = gen_reg_rtx (DImode);
expand_mem_thread_fence (model);
emit_insn (gen_atomic_loaddi_1 (operands[0], operands[1], operands[2]));
- if ((model & MEMMODEL_MASK) == MEMMODEL_SEQ_CST)
+ if (is_mm_seq_cst (model))
expand_mem_thread_fence (model);
DONE;
})
@@ -734,12 +734,12 @@
(match_operand:SI 2 "const_int_operand")] ;; model
"!TARGET_64BIT && !TARGET_SOFT_FLOAT"
{
- enum memmodel model = (enum memmodel) INTVAL (operands[2]);
+ enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
operands[0] = force_reg (SImode, XEXP (operands[0], 0));
operands[2] = gen_reg_rtx (DImode);
expand_mem_thread_fence (model);
emit_insn (gen_atomic_storedi_1 (operands[0], operands[1], operands[2]));
- if ((model & MEMMODEL_MASK) == MEMMODEL_SEQ_CST)
+ if (is_mm_seq_cst (model))
expand_mem_thread_fence (model);
DONE;
})
diff --git a/gcc/config/rs6000/rs6000.c b/gcc/config/rs6000/rs6000.c
index 3e6f2e3..ec7332c 100644
--- a/gcc/config/rs6000/rs6000.c
+++ b/gcc/config/rs6000/rs6000.c
@@ -20516,12 +20516,15 @@ rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
case MEMMODEL_RELAXED:
case MEMMODEL_CONSUME:
case MEMMODEL_ACQUIRE:
+ case MEMMODEL_SYNC_ACQUIRE:
break;
case MEMMODEL_RELEASE:
+ case MEMMODEL_SYNC_RELEASE:
case MEMMODEL_ACQ_REL:
emit_insn (gen_lwsync ());
break;
case MEMMODEL_SEQ_CST:
+ case MEMMODEL_SYNC_SEQ_CST:
emit_insn (gen_hwsync ());
break;
default:
@@ -20538,10 +20541,13 @@ rs6000_post_atomic_barrier (enum memmodel model)
case MEMMODEL_RELAXED:
case MEMMODEL_CONSUME:
case MEMMODEL_RELEASE:
+ case MEMMODEL_SYNC_RELEASE:
break;
case MEMMODEL_ACQUIRE:
+ case MEMMODEL_SYNC_ACQUIRE:
case MEMMODEL_ACQ_REL:
case MEMMODEL_SEQ_CST:
+ case MEMMODEL_SYNC_SEQ_CST:
emit_insn (gen_isync ());
break;
default:
@@ -20642,8 +20648,8 @@ rs6000_expand_atomic_compare_and_swap (rtx operands[])
oldval = operands[3];
newval = operands[4];
is_weak = (INTVAL (operands[5]) != 0);
- mod_s = (enum memmodel) INTVAL (operands[6]);
- mod_f = (enum memmodel) INTVAL (operands[7]);
+ mod_s = memmodel_from_int (INTVAL (operands[6]));
+ mod_f = memmodel_from_int (INTVAL (operands[7]));
orig_mode = mode = GET_MODE (mem);
mask = shift = NULL_RTX;
@@ -20731,12 +20737,12 @@ rs6000_expand_atomic_compare_and_swap (rtx operands[])
emit_unlikely_jump (x, label1);
}
- if (mod_f != MEMMODEL_RELAXED)
+ if (!is_mm_relaxed (mod_f))
emit_label (XEXP (label2, 0));
rs6000_post_atomic_barrier (mod_s);
- if (mod_f == MEMMODEL_RELAXED)
+ if (is_mm_relaxed (mod_f))
emit_label (XEXP (label2, 0));
if (shift)
diff --git a/gcc/config/rs6000/sync.md b/gcc/config/rs6000/sync.md
index 4364c85..8ba30b9 100644
--- a/gcc/config/rs6000/sync.md
+++ b/gcc/config/rs6000/sync.md
@@ -41,18 +41,21 @@
[(match_operand:SI 0 "const_int_operand" "")] ;; model
""
{
- enum memmodel model = (enum memmodel) INTVAL (operands[0]);
+ enum memmodel model = memmodel_from_int (INTVAL (operands[0]));
switch (model)
{
case MEMMODEL_RELAXED:
break;
case MEMMODEL_CONSUME:
case MEMMODEL_ACQUIRE:
+ case MEMMODEL_SYNC_ACQUIRE:
case MEMMODEL_RELEASE:
+ case MEMMODEL_SYNC_RELEASE:
case MEMMODEL_ACQ_REL:
emit_insn (gen_lwsync ());
break;
case MEMMODEL_SEQ_CST:
+ case MEMMODEL_SYNC_SEQ_CST:
emit_insn (gen_hwsync ());
break;
default:
@@ -144,9 +147,9 @@
if (<MODE>mode == TImode && !TARGET_SYNC_TI)
FAIL;
- enum memmodel model = (enum memmodel) INTVAL (operands[2]);
+ enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
- if (model == MEMMODEL_SEQ_CST)
+ if (is_mm_seq_cst (model))
emit_insn (gen_hwsync ());
if (<MODE>mode != TImode)
@@ -182,7 +185,9 @@
break;
case MEMMODEL_CONSUME:
case MEMMODEL_ACQUIRE:
+ case MEMMODEL_SYNC_ACQUIRE:
case MEMMODEL_SEQ_CST:
+ case MEMMODEL_SYNC_SEQ_CST:
emit_insn (gen_loadsync_<mode> (operands[0]));
break;
default:
@@ -209,15 +214,17 @@
if (<MODE>mode == TImode && !TARGET_SYNC_TI)
FAIL;
- enum memmodel model = (enum memmodel) INTVAL (operands[2]);
+ enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
switch (model)
{
case MEMMODEL_RELAXED:
break;
case MEMMODEL_RELEASE:
+ case MEMMODEL_SYNC_RELEASE:
emit_insn (gen_lwsync ());
break;
case MEMMODEL_SEQ_CST:
+ case MEMMODEL_SYNC_SEQ_CST:
emit_insn (gen_hwsync ());
break;
default:
diff --git a/gcc/config/s390/s390.md b/gcc/config/s390/s390.md
index a377a1f..ad06721 100644
--- a/gcc/config/s390/s390.md
+++ b/gcc/config/s390/s390.md
@@ -9225,7 +9225,7 @@
{
/* Unless this is a SEQ_CST fence, the s390 memory model is strong
enough not to require barriers of any kind. */
- if (INTVAL (operands[0]) == MEMMODEL_SEQ_CST)
+ if (is_mm_seq_cst (memmodel_from_int (INTVAL (operands[0]))))
{
rtx mem = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
MEM_VOLATILE_P (mem) = 1;
@@ -9306,7 +9306,7 @@
(match_operand:SI 2 "const_int_operand")] ;; model
""
{
- enum memmodel model = (enum memmodel) INTVAL (operands[2]);
+ enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
if (MEM_ALIGN (operands[0]) < GET_MODE_BITSIZE (GET_MODE (operands[0])))
FAIL;
@@ -9317,7 +9317,7 @@
emit_insn (gen_atomic_storedi_1 (operands[0], operands[1]));
else
emit_move_insn (operands[0], operands[1]);
- if (model == MEMMODEL_SEQ_CST)
+ if (is_mm_seq_cst (model))
emit_insn (gen_mem_thread_fence (operands[2]));
DONE;
})
diff --git a/gcc/config/sparc/sparc.c b/gcc/config/sparc/sparc.c
index aab6f3d..a1562ad 100644
--- a/gcc/config/sparc/sparc.c
+++ b/gcc/config/sparc/sparc.c
@@ -11631,9 +11631,8 @@ sparc_emit_membar_for_model (enum memmodel model,
if (before_after & 1)
{
- if (model == MEMMODEL_RELEASE
- || model == MEMMODEL_ACQ_REL
- || model == MEMMODEL_SEQ_CST)
+ if (is_mm_release (model) || is_mm_acq_rel (model)
+ || is_mm_seq_cst (model))
{
if (load_store & 1)
mm |= LoadLoad | StoreLoad;
@@ -11643,9 +11642,8 @@ sparc_emit_membar_for_model (enum memmodel model,
}
if (before_after & 2)
{
- if (model == MEMMODEL_ACQUIRE
- || model == MEMMODEL_ACQ_REL
- || model == MEMMODEL_SEQ_CST)
+ if (is_mm_acquire (model) || is_mm_acq_rel (model)
+ || is_mm_seq_cst (model))
{
if (load_store & 1)
mm |= LoadLoad | LoadStore;