aboutsummaryrefslogtreecommitdiff
path: root/target/arm/translate-a64.c
diff options
context:
space:
mode:
Diffstat (limited to 'target/arm/translate-a64.c')
-rw-r--r--target/arm/translate-a64.c60
1 files changed, 35 insertions, 25 deletions
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index bbfadb7..951b64c 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -2601,32 +2601,42 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
get_mem_index(s),
MO_64 | MO_ALIGN | s->be_data);
tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val);
- } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
- if (!HAVE_CMPXCHG128) {
- gen_helper_exit_atomic(cpu_env);
- /*
- * Produce a result so we have a well-formed opcode
- * stream when the following (dead) code uses 'tmp'.
- * TCG will remove the dead ops for us.
- */
- tcg_gen_movi_i64(tmp, 0);
- } else if (s->be_data == MO_LE) {
- gen_helper_paired_cmpxchg64_le_parallel(tmp, cpu_env,
- cpu_exclusive_addr,
- cpu_reg(s, rt),
- cpu_reg(s, rt2));
- } else {
- gen_helper_paired_cmpxchg64_be_parallel(tmp, cpu_env,
- cpu_exclusive_addr,
- cpu_reg(s, rt),
- cpu_reg(s, rt2));
- }
- } else if (s->be_data == MO_LE) {
- gen_helper_paired_cmpxchg64_le(tmp, cpu_env, cpu_exclusive_addr,
- cpu_reg(s, rt), cpu_reg(s, rt2));
} else {
- gen_helper_paired_cmpxchg64_be(tmp, cpu_env, cpu_exclusive_addr,
- cpu_reg(s, rt), cpu_reg(s, rt2));
+ TCGv_i128 t16 = tcg_temp_new_i128();
+ TCGv_i128 c16 = tcg_temp_new_i128();
+ TCGv_i64 a, b;
+
+ if (s->be_data == MO_LE) {
+ tcg_gen_concat_i64_i128(t16, cpu_reg(s, rt), cpu_reg(s, rt2));
+ tcg_gen_concat_i64_i128(c16, cpu_exclusive_val,
+ cpu_exclusive_high);
+ } else {
+ tcg_gen_concat_i64_i128(t16, cpu_reg(s, rt2), cpu_reg(s, rt));
+ tcg_gen_concat_i64_i128(c16, cpu_exclusive_high,
+ cpu_exclusive_val);
+ }
+
+ tcg_gen_atomic_cmpxchg_i128(t16, cpu_exclusive_addr, c16, t16,
+ get_mem_index(s),
+ MO_128 | MO_ALIGN | s->be_data);
+ tcg_temp_free_i128(c16);
+
+ a = tcg_temp_new_i64();
+ b = tcg_temp_new_i64();
+ if (s->be_data == MO_LE) {
+ tcg_gen_extr_i128_i64(a, b, t16);
+ } else {
+ tcg_gen_extr_i128_i64(b, a, t16);
+ }
+
+ tcg_gen_xor_i64(a, a, cpu_exclusive_val);
+ tcg_gen_xor_i64(b, b, cpu_exclusive_high);
+ tcg_gen_or_i64(tmp, a, b);
+ tcg_temp_free_i64(a);
+ tcg_temp_free_i64(b);
+ tcg_temp_free_i128(t16);
+
+ tcg_gen_setcondi_i64(TCG_COND_NE, tmp, tmp, 0);
}
} else {
tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr, cpu_exclusive_val,