aboutsummaryrefslogtreecommitdiff
path: root/accel/tcg/atomic_common.c.inc
blob: 95a5c5ff12d9e6f4fd3bc8cab3a5f38a222dfdc1 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
/*
 * Common Atomic Helper Functions
 *
 * This file should be included before the various instantiations of
 * the atomic_template.h helpers.
 *
 * Copyright (c) 2019 Linaro
 * Written by Alex Bennée <alex.bennee@linaro.org>
 *
 * SPDX-License-Identifier: GPL-2.0-or-later
 *
 * This work is licensed under the terms of the GNU GPL, version 2 or later.
 * See the COPYING file in the top-level directory.
 */

static void atomic_trace_rmw_post(CPUArchState *env, uint64_t addr,
                                  MemOpIdx oi)
{
    qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_RW);
}

/*
 * Atomic helpers callable from TCG.
 * These have a common interface and all defer to cpu_atomic_*
 * using the host return address from GETPC().
 */

#define CMPXCHG_HELPER(OP, TYPE) \
    TYPE HELPER(atomic_##OP)(CPUArchState *env, uint64_t addr,      \
                             TYPE oldv, TYPE newv, uint32_t oi)     \
    { return cpu_atomic_##OP##_mmu(env, addr, oldv, newv, oi, GETPC()); }

CMPXCHG_HELPER(cmpxchgb, uint32_t)
CMPXCHG_HELPER(cmpxchgw_be, uint32_t)
CMPXCHG_HELPER(cmpxchgw_le, uint32_t)
CMPXCHG_HELPER(cmpxchgl_be, uint32_t)
CMPXCHG_HELPER(cmpxchgl_le, uint32_t)

#ifdef CONFIG_ATOMIC64
CMPXCHG_HELPER(cmpxchgq_be, uint64_t)
CMPXCHG_HELPER(cmpxchgq_le, uint64_t)
#endif

#if HAVE_CMPXCHG128
CMPXCHG_HELPER(cmpxchgo_be, Int128)
CMPXCHG_HELPER(cmpxchgo_le, Int128)
#endif

#undef CMPXCHG_HELPER

Int128 HELPER(nonatomic_cmpxchgo)(CPUArchState *env, uint64_t addr,
                                  Int128 cmpv, Int128 newv, uint32_t oi)
{
#if TCG_TARGET_REG_BITS == 32
    uintptr_t ra = GETPC();
    Int128 oldv;

    oldv = cpu_ld16_mmu(env, addr, oi, ra);
    if (int128_eq(oldv, cmpv)) {
        cpu_st16_mmu(env, addr, newv, oi, ra);
    } else {
        /* Even with comparison failure, still need a write cycle. */
        probe_write(env, addr, 16, get_mmuidx(oi), ra);
    }
    return oldv;
#else
    g_assert_not_reached();
#endif
}

#define ATOMIC_HELPER(OP, TYPE) \
    TYPE HELPER(glue(atomic_,OP))(CPUArchState *env, uint64_t addr,  \
                                  TYPE val, uint32_t oi)                 \
    { return glue(glue(cpu_atomic_,OP),_mmu)(env, addr, val, oi, GETPC()); }

#ifdef CONFIG_ATOMIC64
#define GEN_ATOMIC_HELPERS(OP)              \
    ATOMIC_HELPER(glue(OP,b), uint32_t)     \
    ATOMIC_HELPER(glue(OP,w_be), uint32_t)  \
    ATOMIC_HELPER(glue(OP,w_le), uint32_t)  \
    ATOMIC_HELPER(glue(OP,l_be), uint32_t)  \
    ATOMIC_HELPER(glue(OP,l_le), uint32_t)  \
    ATOMIC_HELPER(glue(OP,q_be), uint64_t)  \
    ATOMIC_HELPER(glue(OP,q_le), uint64_t)
#else
#define GEN_ATOMIC_HELPERS(OP)              \
    ATOMIC_HELPER(glue(OP,b), uint32_t)     \
    ATOMIC_HELPER(glue(OP,w_be), uint32_t)  \
    ATOMIC_HELPER(glue(OP,w_le), uint32_t)  \
    ATOMIC_HELPER(glue(OP,l_be), uint32_t)  \
    ATOMIC_HELPER(glue(OP,l_le), uint32_t)
#endif

GEN_ATOMIC_HELPERS(fetch_add)
GEN_ATOMIC_HELPERS(fetch_and)
GEN_ATOMIC_HELPERS(fetch_or)
GEN_ATOMIC_HELPERS(fetch_xor)
GEN_ATOMIC_HELPERS(fetch_smin)
GEN_ATOMIC_HELPERS(fetch_umin)
GEN_ATOMIC_HELPERS(fetch_smax)
GEN_ATOMIC_HELPERS(fetch_umax)

GEN_ATOMIC_HELPERS(add_fetch)
GEN_ATOMIC_HELPERS(and_fetch)
GEN_ATOMIC_HELPERS(or_fetch)
GEN_ATOMIC_HELPERS(xor_fetch)
GEN_ATOMIC_HELPERS(smin_fetch)
GEN_ATOMIC_HELPERS(umin_fetch)
GEN_ATOMIC_HELPERS(smax_fetch)
GEN_ATOMIC_HELPERS(umax_fetch)

GEN_ATOMIC_HELPERS(xchg)

#undef ATOMIC_HELPER
#undef GEN_ATOMIC_HELPERS