aboutsummaryrefslogtreecommitdiff
path: root/include/accel
diff options
context:
space:
mode:
Diffstat (limited to 'include/accel')
-rw-r--r--include/accel/accel-cpu-target.h31
-rw-r--r--include/accel/accel-cpu.h23
-rw-r--r--include/accel/tcg/cpu-ldst-common.h122
-rw-r--r--include/accel/tcg/cpu-ldst.h505
-rw-r--r--include/accel/tcg/cpu-mmu-index.h42
-rw-r--r--include/accel/tcg/cpu-ops.h333
-rw-r--r--include/accel/tcg/getpc.h20
-rw-r--r--include/accel/tcg/helper-retaddr.h43
-rw-r--r--include/accel/tcg/iommu.h41
-rw-r--r--include/accel/tcg/probe.h122
-rw-r--r--include/accel/tcg/tb-cpu-state.h18
11 files changed, 1300 insertions, 0 deletions
diff --git a/include/accel/accel-cpu-target.h b/include/accel/accel-cpu-target.h
new file mode 100644
index 0000000..6feb344
--- /dev/null
+++ b/include/accel/accel-cpu-target.h
@@ -0,0 +1,31 @@
+/*
+ * Accelerator interface, specializes CPUClass
+ * This header is used only by target-specific code.
+ *
+ * Copyright 2021 SUSE LLC
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef ACCEL_CPU_TARGET_H
+#define ACCEL_CPU_TARGET_H
+
+/*
+ * This header is used to define new accelerator-specific target-specific
+ * accelerator cpu subclasses.
+ * It uses CPU_RESOLVING_TYPE, so this is clearly target-specific.
+ *
+ * Do not try to use for any other purpose than the implementation of new
+ * subclasses in target/, or the accel implementation itself in accel/
+ */
+
+#include "qom/object.h"
+#include "accel/accel-cpu.h"
+#include "cpu.h"
+
+#define TYPE_ACCEL_CPU "accel-" CPU_RESOLVING_TYPE
+#define ACCEL_CPU_NAME(name) (name "-" TYPE_ACCEL_CPU)
+DECLARE_CLASS_CHECKERS(AccelCPUClass, ACCEL_CPU, TYPE_ACCEL_CPU)
+
+#endif /* ACCEL_CPU_H */
diff --git a/include/accel/accel-cpu.h b/include/accel/accel-cpu.h
new file mode 100644
index 0000000..9e7eede
--- /dev/null
+++ b/include/accel/accel-cpu.h
@@ -0,0 +1,23 @@
+/*
+ * Accelerator interface, specializes CPUClass
+ *
+ * Copyright 2021 SUSE LLC
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef ACCEL_CPU_H
+#define ACCEL_CPU_H
+
+#include "qom/object.h"
+#include "hw/core/cpu.h"
+
+typedef struct AccelCPUClass {
+ ObjectClass parent_class;
+
+ void (*cpu_class_init)(CPUClass *cc);
+ void (*cpu_instance_init)(CPUState *cpu);
+ bool (*cpu_target_realize)(CPUState *cpu, Error **errp);
+} AccelCPUClass;
+
+#endif /* ACCEL_CPU_H */
diff --git a/include/accel/tcg/cpu-ldst-common.h b/include/accel/tcg/cpu-ldst-common.h
new file mode 100644
index 0000000..8bf17c2
--- /dev/null
+++ b/include/accel/tcg/cpu-ldst-common.h
@@ -0,0 +1,122 @@
+/*
+ * Software MMU support
+ *
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ */
+
+#ifndef ACCEL_TCG_CPU_LDST_COMMON_H
+#define ACCEL_TCG_CPU_LDST_COMMON_H
+
+#ifndef CONFIG_TCG
+#error Can only include this header with TCG
+#endif
+
+#include "exec/memopidx.h"
+#include "exec/vaddr.h"
+#include "exec/mmu-access-type.h"
+#include "qemu/int128.h"
+
+uint8_t cpu_ldb_mmu(CPUArchState *env, vaddr ptr, MemOpIdx oi, uintptr_t ra);
+uint16_t cpu_ldw_mmu(CPUArchState *env, vaddr ptr, MemOpIdx oi, uintptr_t ra);
+uint32_t cpu_ldl_mmu(CPUArchState *env, vaddr ptr, MemOpIdx oi, uintptr_t ra);
+uint64_t cpu_ldq_mmu(CPUArchState *env, vaddr ptr, MemOpIdx oi, uintptr_t ra);
+Int128 cpu_ld16_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi, uintptr_t ra);
+
+void cpu_stb_mmu(CPUArchState *env, vaddr ptr, uint8_t val,
+ MemOpIdx oi, uintptr_t ra);
+void cpu_stw_mmu(CPUArchState *env, vaddr ptr, uint16_t val,
+ MemOpIdx oi, uintptr_t ra);
+void cpu_stl_mmu(CPUArchState *env, vaddr ptr, uint32_t val,
+ MemOpIdx oi, uintptr_t ra);
+void cpu_stq_mmu(CPUArchState *env, vaddr ptr, uint64_t val,
+ MemOpIdx oi, uintptr_t ra);
+void cpu_st16_mmu(CPUArchState *env, vaddr addr, Int128 val,
+ MemOpIdx oi, uintptr_t ra);
+
+uint32_t cpu_atomic_cmpxchgb_mmu(CPUArchState *env, vaddr addr,
+ uint32_t cmpv, uint32_t newv,
+ MemOpIdx oi, uintptr_t retaddr);
+uint32_t cpu_atomic_cmpxchgw_le_mmu(CPUArchState *env, vaddr addr,
+ uint32_t cmpv, uint32_t newv,
+ MemOpIdx oi, uintptr_t retaddr);
+uint32_t cpu_atomic_cmpxchgl_le_mmu(CPUArchState *env, vaddr addr,
+ uint32_t cmpv, uint32_t newv,
+ MemOpIdx oi, uintptr_t retaddr);
+uint64_t cpu_atomic_cmpxchgq_le_mmu(CPUArchState *env, vaddr addr,
+ uint64_t cmpv, uint64_t newv,
+ MemOpIdx oi, uintptr_t retaddr);
+uint32_t cpu_atomic_cmpxchgw_be_mmu(CPUArchState *env, vaddr addr,
+ uint32_t cmpv, uint32_t newv,
+ MemOpIdx oi, uintptr_t retaddr);
+uint32_t cpu_atomic_cmpxchgl_be_mmu(CPUArchState *env, vaddr addr,
+ uint32_t cmpv, uint32_t newv,
+ MemOpIdx oi, uintptr_t retaddr);
+uint64_t cpu_atomic_cmpxchgq_be_mmu(CPUArchState *env, vaddr addr,
+ uint64_t cmpv, uint64_t newv,
+ MemOpIdx oi, uintptr_t retaddr);
+
+#define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX) \
+TYPE cpu_atomic_ ## NAME ## SUFFIX ## _mmu \
+ (CPUArchState *env, vaddr addr, TYPE val, \
+ MemOpIdx oi, uintptr_t retaddr);
+
+#ifdef CONFIG_ATOMIC64
+#define GEN_ATOMIC_HELPER_ALL(NAME) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, l_be) \
+ GEN_ATOMIC_HELPER(NAME, uint64_t, q_le) \
+ GEN_ATOMIC_HELPER(NAME, uint64_t, q_be)
+#else
+#define GEN_ATOMIC_HELPER_ALL(NAME) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, l_be)
+#endif
+
+GEN_ATOMIC_HELPER_ALL(fetch_add)
+GEN_ATOMIC_HELPER_ALL(fetch_sub)
+GEN_ATOMIC_HELPER_ALL(fetch_and)
+GEN_ATOMIC_HELPER_ALL(fetch_or)
+GEN_ATOMIC_HELPER_ALL(fetch_xor)
+GEN_ATOMIC_HELPER_ALL(fetch_smin)
+GEN_ATOMIC_HELPER_ALL(fetch_umin)
+GEN_ATOMIC_HELPER_ALL(fetch_smax)
+GEN_ATOMIC_HELPER_ALL(fetch_umax)
+
+GEN_ATOMIC_HELPER_ALL(add_fetch)
+GEN_ATOMIC_HELPER_ALL(sub_fetch)
+GEN_ATOMIC_HELPER_ALL(and_fetch)
+GEN_ATOMIC_HELPER_ALL(or_fetch)
+GEN_ATOMIC_HELPER_ALL(xor_fetch)
+GEN_ATOMIC_HELPER_ALL(smin_fetch)
+GEN_ATOMIC_HELPER_ALL(umin_fetch)
+GEN_ATOMIC_HELPER_ALL(smax_fetch)
+GEN_ATOMIC_HELPER_ALL(umax_fetch)
+
+GEN_ATOMIC_HELPER_ALL(xchg)
+
+#undef GEN_ATOMIC_HELPER_ALL
+#undef GEN_ATOMIC_HELPER
+
+Int128 cpu_atomic_cmpxchgo_le_mmu(CPUArchState *env, vaddr addr,
+ Int128 cmpv, Int128 newv,
+ MemOpIdx oi, uintptr_t retaddr);
+Int128 cpu_atomic_cmpxchgo_be_mmu(CPUArchState *env, vaddr addr,
+ Int128 cmpv, Int128 newv,
+ MemOpIdx oi, uintptr_t retaddr);
+
+uint8_t cpu_ldb_code_mmu(CPUArchState *env, vaddr addr,
+ MemOpIdx oi, uintptr_t ra);
+uint16_t cpu_ldw_code_mmu(CPUArchState *env, vaddr addr,
+ MemOpIdx oi, uintptr_t ra);
+uint32_t cpu_ldl_code_mmu(CPUArchState *env, vaddr addr,
+ MemOpIdx oi, uintptr_t ra);
+uint64_t cpu_ldq_code_mmu(CPUArchState *env, vaddr addr,
+ MemOpIdx oi, uintptr_t ra);
+
+#endif /* ACCEL_TCG_CPU_LDST_COMMON_H */
diff --git a/include/accel/tcg/cpu-ldst.h b/include/accel/tcg/cpu-ldst.h
new file mode 100644
index 0000000..0de7f5e
--- /dev/null
+++ b/include/accel/tcg/cpu-ldst.h
@@ -0,0 +1,505 @@
+/*
+ * Software MMU support (per-target)
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+/*
+ * Generate inline load/store functions for all MMU modes (typically
+ * at least _user and _kernel) as well as _data versions, for all data
+ * sizes.
+ *
+ * Used by target op helpers.
+ *
+ * The syntax for the accessors is:
+ *
+ * load: cpu_ld{sign}{size}{end}_{mmusuffix}(env, ptr)
+ * cpu_ld{sign}{size}{end}_{mmusuffix}_ra(env, ptr, retaddr)
+ * cpu_ld{sign}{size}{end}_mmuidx_ra(env, ptr, mmu_idx, retaddr)
+ * cpu_ld{sign}{size}{end}_mmu(env, ptr, oi, retaddr)
+ *
+ * store: cpu_st{size}{end}_{mmusuffix}(env, ptr, val)
+ * cpu_st{size}{end}_{mmusuffix}_ra(env, ptr, val, retaddr)
+ * cpu_st{size}{end}_mmuidx_ra(env, ptr, val, mmu_idx, retaddr)
+ * cpu_st{size}{end}_mmu(env, ptr, val, oi, retaddr)
+ *
+ * sign is:
+ * (empty): for 32 and 64 bit sizes
+ * u : unsigned
+ * s : signed
+ *
+ * size is:
+ * b: 8 bits
+ * w: 16 bits
+ * l: 32 bits
+ * q: 64 bits
+ *
+ * end is:
+ * (empty): for target native endian, or for 8 bit access
+ * _be: for forced big endian
+ * _le: for forced little endian
+ *
+ * mmusuffix is one of the generic suffixes "data" or "code", or "mmuidx".
+ * The "mmuidx" suffix carries an extra mmu_idx argument that specifies
+ * the index to use; the "data" and "code" suffixes take the index from
+ * cpu_mmu_index().
+ *
+ * The "mmu" suffix carries the full MemOpIdx, with both mmu_idx and the
+ * MemOp including alignment requirements. The alignment will be enforced.
+ */
+#ifndef ACCEL_TCG_CPU_LDST_H
+#define ACCEL_TCG_CPU_LDST_H
+
+#ifndef CONFIG_TCG
+#error Can only include this header with TCG
+#endif
+
+#include "exec/cpu-common.h"
+#include "accel/tcg/cpu-ldst-common.h"
+#include "accel/tcg/cpu-mmu-index.h"
+#include "exec/abi_ptr.h"
+
+#if defined(CONFIG_USER_ONLY)
+#include "user/guest-host.h"
+#endif /* CONFIG_USER_ONLY */
+
+static inline uint32_t
+cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr, int mmu_idx, uintptr_t ra)
+{
+ MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
+ return cpu_ldb_mmu(env, addr, oi, ra);
+}
+
+static inline int
+cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr, int mmu_idx, uintptr_t ra)
+{
+ return (int8_t)cpu_ldub_mmuidx_ra(env, addr, mmu_idx, ra);
+}
+
+static inline uint32_t
+cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
+ int mmu_idx, uintptr_t ra)
+{
+ MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx);
+ return cpu_ldw_mmu(env, addr, oi, ra);
+}
+
+static inline int
+cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
+ int mmu_idx, uintptr_t ra)
+{
+ return (int16_t)cpu_lduw_be_mmuidx_ra(env, addr, mmu_idx, ra);
+}
+
+static inline uint32_t
+cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
+ int mmu_idx, uintptr_t ra)
+{
+ MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx);
+ return cpu_ldl_mmu(env, addr, oi, ra);
+}
+
+static inline uint64_t
+cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
+ int mmu_idx, uintptr_t ra)
+{
+ MemOpIdx oi = make_memop_idx(MO_BEUQ | MO_UNALN, mmu_idx);
+ return cpu_ldq_mmu(env, addr, oi, ra);
+}
+
+static inline uint32_t
+cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
+ int mmu_idx, uintptr_t ra)
+{
+ MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx);
+ return cpu_ldw_mmu(env, addr, oi, ra);
+}
+
+static inline int
+cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
+ int mmu_idx, uintptr_t ra)
+{
+ return (int16_t)cpu_lduw_le_mmuidx_ra(env, addr, mmu_idx, ra);
+}
+
+static inline uint32_t
+cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
+ int mmu_idx, uintptr_t ra)
+{
+ MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx);
+ return cpu_ldl_mmu(env, addr, oi, ra);
+}
+
+static inline uint64_t
+cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
+ int mmu_idx, uintptr_t ra)
+{
+ MemOpIdx oi = make_memop_idx(MO_LEUQ | MO_UNALN, mmu_idx);
+ return cpu_ldq_mmu(env, addr, oi, ra);
+}
+
+static inline void
+cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
+ int mmu_idx, uintptr_t ra)
+{
+ MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
+ cpu_stb_mmu(env, addr, val, oi, ra);
+}
+
+static inline void
+cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
+ int mmu_idx, uintptr_t ra)
+{
+ MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx);
+ cpu_stw_mmu(env, addr, val, oi, ra);
+}
+
+static inline void
+cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
+ int mmu_idx, uintptr_t ra)
+{
+ MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx);
+ cpu_stl_mmu(env, addr, val, oi, ra);
+}
+
+static inline void
+cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
+ int mmu_idx, uintptr_t ra)
+{
+ MemOpIdx oi = make_memop_idx(MO_BEUQ | MO_UNALN, mmu_idx);
+ cpu_stq_mmu(env, addr, val, oi, ra);
+}
+
+static inline void
+cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
+ int mmu_idx, uintptr_t ra)
+{
+ MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx);
+ cpu_stw_mmu(env, addr, val, oi, ra);
+}
+
+static inline void
+cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
+ int mmu_idx, uintptr_t ra)
+{
+ MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx);
+ cpu_stl_mmu(env, addr, val, oi, ra);
+}
+
+static inline void
+cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
+ int mmu_idx, uintptr_t ra)
+{
+ MemOpIdx oi = make_memop_idx(MO_LEUQ | MO_UNALN, mmu_idx);
+ cpu_stq_mmu(env, addr, val, oi, ra);
+}
+
+/*--------------------------*/
+
+static inline uint32_t
+cpu_ldub_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
+{
+ int mmu_index = cpu_mmu_index(env_cpu(env), false);
+ return cpu_ldub_mmuidx_ra(env, addr, mmu_index, ra);
+}
+
+static inline int
+cpu_ldsb_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
+{
+ return (int8_t)cpu_ldub_data_ra(env, addr, ra);
+}
+
+static inline uint32_t
+cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
+{
+ int mmu_index = cpu_mmu_index(env_cpu(env), false);
+ return cpu_lduw_be_mmuidx_ra(env, addr, mmu_index, ra);
+}
+
+static inline int
+cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
+{
+ return (int16_t)cpu_lduw_be_data_ra(env, addr, ra);
+}
+
+static inline uint32_t
+cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
+{
+ int mmu_index = cpu_mmu_index(env_cpu(env), false);
+ return cpu_ldl_be_mmuidx_ra(env, addr, mmu_index, ra);
+}
+
+static inline uint64_t
+cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
+{
+ int mmu_index = cpu_mmu_index(env_cpu(env), false);
+ return cpu_ldq_be_mmuidx_ra(env, addr, mmu_index, ra);
+}
+
+static inline uint32_t
+cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
+{
+ int mmu_index = cpu_mmu_index(env_cpu(env), false);
+ return cpu_lduw_le_mmuidx_ra(env, addr, mmu_index, ra);
+}
+
+static inline int
+cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
+{
+ return (int16_t)cpu_lduw_le_data_ra(env, addr, ra);
+}
+
+static inline uint32_t
+cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
+{
+ int mmu_index = cpu_mmu_index(env_cpu(env), false);
+ return cpu_ldl_le_mmuidx_ra(env, addr, mmu_index, ra);
+}
+
+static inline uint64_t
+cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
+{
+ int mmu_index = cpu_mmu_index(env_cpu(env), false);
+ return cpu_ldq_le_mmuidx_ra(env, addr, mmu_index, ra);
+}
+
+static inline void
+cpu_stb_data_ra(CPUArchState *env, abi_ptr addr, uint32_t val, uintptr_t ra)
+{
+ int mmu_index = cpu_mmu_index(env_cpu(env), false);
+ cpu_stb_mmuidx_ra(env, addr, val, mmu_index, ra);
+}
+
+static inline void
+cpu_stw_be_data_ra(CPUArchState *env, abi_ptr addr, uint32_t val, uintptr_t ra)
+{
+ int mmu_index = cpu_mmu_index(env_cpu(env), false);
+ cpu_stw_be_mmuidx_ra(env, addr, val, mmu_index, ra);
+}
+
+static inline void
+cpu_stl_be_data_ra(CPUArchState *env, abi_ptr addr, uint32_t val, uintptr_t ra)
+{
+ int mmu_index = cpu_mmu_index(env_cpu(env), false);
+ cpu_stl_be_mmuidx_ra(env, addr, val, mmu_index, ra);
+}
+
+static inline void
+cpu_stq_be_data_ra(CPUArchState *env, abi_ptr addr, uint64_t val, uintptr_t ra)
+{
+ int mmu_index = cpu_mmu_index(env_cpu(env), false);
+ cpu_stq_be_mmuidx_ra(env, addr, val, mmu_index, ra);
+}
+
+static inline void
+cpu_stw_le_data_ra(CPUArchState *env, abi_ptr addr, uint32_t val, uintptr_t ra)
+{
+ int mmu_index = cpu_mmu_index(env_cpu(env), false);
+ cpu_stw_le_mmuidx_ra(env, addr, val, mmu_index, ra);
+}
+
+static inline void
+cpu_stl_le_data_ra(CPUArchState *env, abi_ptr addr, uint32_t val, uintptr_t ra)
+{
+ int mmu_index = cpu_mmu_index(env_cpu(env), false);
+ cpu_stl_le_mmuidx_ra(env, addr, val, mmu_index, ra);
+}
+
+static inline void
+cpu_stq_le_data_ra(CPUArchState *env, abi_ptr addr, uint64_t val, uintptr_t ra)
+{
+ int mmu_index = cpu_mmu_index(env_cpu(env), false);
+ cpu_stq_le_mmuidx_ra(env, addr, val, mmu_index, ra);
+}
+
+/*--------------------------*/
+
+static inline uint32_t
+cpu_ldub_data(CPUArchState *env, abi_ptr addr)
+{
+ return cpu_ldub_data_ra(env, addr, 0);
+}
+
+static inline int
+cpu_ldsb_data(CPUArchState *env, abi_ptr addr)
+{
+ return (int8_t)cpu_ldub_data(env, addr);
+}
+
+static inline uint32_t
+cpu_lduw_be_data(CPUArchState *env, abi_ptr addr)
+{
+ return cpu_lduw_be_data_ra(env, addr, 0);
+}
+
+static inline int
+cpu_ldsw_be_data(CPUArchState *env, abi_ptr addr)
+{
+ return (int16_t)cpu_lduw_be_data(env, addr);
+}
+
+static inline uint32_t
+cpu_ldl_be_data(CPUArchState *env, abi_ptr addr)
+{
+ return cpu_ldl_be_data_ra(env, addr, 0);
+}
+
+static inline uint64_t
+cpu_ldq_be_data(CPUArchState *env, abi_ptr addr)
+{
+ return cpu_ldq_be_data_ra(env, addr, 0);
+}
+
+static inline uint32_t
+cpu_lduw_le_data(CPUArchState *env, abi_ptr addr)
+{
+ return cpu_lduw_le_data_ra(env, addr, 0);
+}
+
+static inline int
+cpu_ldsw_le_data(CPUArchState *env, abi_ptr addr)
+{
+ return (int16_t)cpu_lduw_le_data(env, addr);
+}
+
+static inline uint32_t
+cpu_ldl_le_data(CPUArchState *env, abi_ptr addr)
+{
+ return cpu_ldl_le_data_ra(env, addr, 0);
+}
+
+static inline uint64_t
+cpu_ldq_le_data(CPUArchState *env, abi_ptr addr)
+{
+ return cpu_ldq_le_data_ra(env, addr, 0);
+}
+
+static inline void
+cpu_stb_data(CPUArchState *env, abi_ptr addr, uint32_t val)
+{
+ cpu_stb_data_ra(env, addr, val, 0);
+}
+
+static inline void
+cpu_stw_be_data(CPUArchState *env, abi_ptr addr, uint32_t val)
+{
+ cpu_stw_be_data_ra(env, addr, val, 0);
+}
+
+static inline void
+cpu_stl_be_data(CPUArchState *env, abi_ptr addr, uint32_t val)
+{
+ cpu_stl_be_data_ra(env, addr, val, 0);
+}
+
+static inline void
+cpu_stq_be_data(CPUArchState *env, abi_ptr addr, uint64_t val)
+{
+ cpu_stq_be_data_ra(env, addr, val, 0);
+}
+
+static inline void
+cpu_stw_le_data(CPUArchState *env, abi_ptr addr, uint32_t val)
+{
+ cpu_stw_le_data_ra(env, addr, val, 0);
+}
+
+static inline void
+cpu_stl_le_data(CPUArchState *env, abi_ptr addr, uint32_t val)
+{
+ cpu_stl_le_data_ra(env, addr, val, 0);
+}
+
+static inline void
+cpu_stq_le_data(CPUArchState *env, abi_ptr addr, uint64_t val)
+{
+ cpu_stq_le_data_ra(env, addr, val, 0);
+}
+
+#if TARGET_BIG_ENDIAN
+# define cpu_lduw_data cpu_lduw_be_data
+# define cpu_ldsw_data cpu_ldsw_be_data
+# define cpu_ldl_data cpu_ldl_be_data
+# define cpu_ldq_data cpu_ldq_be_data
+# define cpu_lduw_data_ra cpu_lduw_be_data_ra
+# define cpu_ldsw_data_ra cpu_ldsw_be_data_ra
+# define cpu_ldl_data_ra cpu_ldl_be_data_ra
+# define cpu_ldq_data_ra cpu_ldq_be_data_ra
+# define cpu_lduw_mmuidx_ra cpu_lduw_be_mmuidx_ra
+# define cpu_ldsw_mmuidx_ra cpu_ldsw_be_mmuidx_ra
+# define cpu_ldl_mmuidx_ra cpu_ldl_be_mmuidx_ra
+# define cpu_ldq_mmuidx_ra cpu_ldq_be_mmuidx_ra
+# define cpu_stw_data cpu_stw_be_data
+# define cpu_stl_data cpu_stl_be_data
+# define cpu_stq_data cpu_stq_be_data
+# define cpu_stw_data_ra cpu_stw_be_data_ra
+# define cpu_stl_data_ra cpu_stl_be_data_ra
+# define cpu_stq_data_ra cpu_stq_be_data_ra
+# define cpu_stw_mmuidx_ra cpu_stw_be_mmuidx_ra
+# define cpu_stl_mmuidx_ra cpu_stl_be_mmuidx_ra
+# define cpu_stq_mmuidx_ra cpu_stq_be_mmuidx_ra
+#else
+# define cpu_lduw_data cpu_lduw_le_data
+# define cpu_ldsw_data cpu_ldsw_le_data
+# define cpu_ldl_data cpu_ldl_le_data
+# define cpu_ldq_data cpu_ldq_le_data
+# define cpu_lduw_data_ra cpu_lduw_le_data_ra
+# define cpu_ldsw_data_ra cpu_ldsw_le_data_ra
+# define cpu_ldl_data_ra cpu_ldl_le_data_ra
+# define cpu_ldq_data_ra cpu_ldq_le_data_ra
+# define cpu_lduw_mmuidx_ra cpu_lduw_le_mmuidx_ra
+# define cpu_ldsw_mmuidx_ra cpu_ldsw_le_mmuidx_ra
+# define cpu_ldl_mmuidx_ra cpu_ldl_le_mmuidx_ra
+# define cpu_ldq_mmuidx_ra cpu_ldq_le_mmuidx_ra
+# define cpu_stw_data cpu_stw_le_data
+# define cpu_stl_data cpu_stl_le_data
+# define cpu_stq_data cpu_stq_le_data
+# define cpu_stw_data_ra cpu_stw_le_data_ra
+# define cpu_stl_data_ra cpu_stl_le_data_ra
+# define cpu_stq_data_ra cpu_stq_le_data_ra
+# define cpu_stw_mmuidx_ra cpu_stw_le_mmuidx_ra
+# define cpu_stl_mmuidx_ra cpu_stl_le_mmuidx_ra
+# define cpu_stq_mmuidx_ra cpu_stq_le_mmuidx_ra
+#endif
+
+static inline uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr)
+{
+ CPUState *cs = env_cpu(env);
+ MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(cs, true));
+ return cpu_ldb_code_mmu(env, addr, oi, 0);
+}
+
+static inline uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr)
+{
+ CPUState *cs = env_cpu(env);
+ MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(cs, true));
+ return cpu_ldw_code_mmu(env, addr, oi, 0);
+}
+
+static inline uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr)
+{
+ CPUState *cs = env_cpu(env);
+ MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(cs, true));
+ return cpu_ldl_code_mmu(env, addr, oi, 0);
+}
+
+static inline uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr)
+{
+ CPUState *cs = env_cpu(env);
+ MemOpIdx oi = make_memop_idx(MO_TEUQ, cpu_mmu_index(cs, true));
+ return cpu_ldq_code_mmu(env, addr, oi, 0);
+}
+
+#endif /* ACCEL_TCG_CPU_LDST_H */
diff --git a/include/accel/tcg/cpu-mmu-index.h b/include/accel/tcg/cpu-mmu-index.h
new file mode 100644
index 0000000..e681a90
--- /dev/null
+++ b/include/accel/tcg/cpu-mmu-index.h
@@ -0,0 +1,42 @@
+/*
+ * cpu_mmu_index()
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ */
+
+#ifndef ACCEL_TCG_CPU_MMU_INDEX_H
+#define ACCEL_TCG_CPU_MMU_INDEX_H
+
+#include "hw/core/cpu.h"
+#include "accel/tcg/cpu-ops.h"
+#include "tcg/debug-assert.h"
+#ifdef COMPILING_PER_TARGET
+# ifdef CONFIG_USER_ONLY
+# include "cpu.h"
+# endif
+#endif
+
+/**
+ * cpu_mmu_index:
+ * @env: The cpu environment
+ * @ifetch: True for code access, false for data access.
+ *
+ * Return the core mmu index for the current translation regime.
+ * This function is used by generic TCG code paths.
+ */
+static inline int cpu_mmu_index(CPUState *cs, bool ifetch)
+{
+#ifdef COMPILING_PER_TARGET
+# ifdef CONFIG_USER_ONLY
+ return MMU_USER_IDX;
+# endif
+#endif
+
+ int ret = cs->cc->tcg_ops->mmu_index(cs, ifetch);
+ tcg_debug_assert(ret >= 0 && ret < NB_MMU_MODES);
+ return ret;
+}
+
+#endif /* ACCEL_TCG_CPU_MMU_INDEX_H */
diff --git a/include/accel/tcg/cpu-ops.h b/include/accel/tcg/cpu-ops.h
new file mode 100644
index 0000000..dd8ea30
--- /dev/null
+++ b/include/accel/tcg/cpu-ops.h
@@ -0,0 +1,333 @@
+/*
+ * TCG CPU-specific operations
+ *
+ * Copyright 2021 SUSE LLC
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef TCG_CPU_OPS_H
+#define TCG_CPU_OPS_H
+
+#include "exec/breakpoint.h"
+#include "exec/hwaddr.h"
+#include "exec/memattrs.h"
+#include "exec/memop.h"
+#include "exec/mmu-access-type.h"
+#include "exec/vaddr.h"
+#include "accel/tcg/tb-cpu-state.h"
+#include "tcg/tcg-mo.h"
+
+struct TCGCPUOps {
+ /**
+ * mttcg_supported: multi-threaded TCG is supported
+ *
+ * Target (TCG frontend) supports:
+ * - atomic instructions
+ * - memory ordering primitives (barriers)
+ */
+ bool mttcg_supported;
+
+ /**
+ * @precise_smc: Stores which modify code within the current TB force
+ * the TB to exit; the next executed instruction will see
+ * the result of the store.
+ */
+ bool precise_smc;
+
+ /**
+ * @guest_default_memory_order: default barrier that is required
+ * for the guest memory ordering.
+ */
+ TCGBar guest_default_memory_order;
+
+ /**
+ * @initialize: Initialize TCG state
+ *
+ * Called when the first CPU is realized.
+ */
+ void (*initialize)(void);
+ /**
+ * @translate_code: Translate guest instructions to TCGOps
+ * @cpu: cpu context
+ * @tb: translation block
+ * @max_insns: max number of instructions to translate
+ * @pc: guest virtual program counter address
+ * @host_pc: host physical program counter address
+ *
+ * This function must be provided by the target, which should create
+ * the target-specific DisasContext, and then invoke translator_loop.
+ */
+ void (*translate_code)(CPUState *cpu, TranslationBlock *tb,
+ int *max_insns, vaddr pc, void *host_pc);
+ /**
+ * @get_tb_cpu_state: Extract CPU state for a TCG #TranslationBlock
+ *
+ * Fill in all data required to select or compile a TranslationBlock.
+ */
+ TCGTBCPUState (*get_tb_cpu_state)(CPUState *cs);
+ /**
+ * @synchronize_from_tb: Synchronize state from a TCG #TranslationBlock
+ *
+ * This is called when we abandon execution of a TB before starting it,
+ * and must set all parts of the CPU state which the previous TB in the
+ * chain may not have updated.
+ * By default, when this is NULL, a call is made to @set_pc(tb->pc).
+ *
+ * If more state needs to be restored, the target must implement a
+ * function to restore all the state, and register it here.
+ */
+ void (*synchronize_from_tb)(CPUState *cpu, const TranslationBlock *tb);
+ /**
+ * @restore_state_to_opc: Synchronize state from INDEX_op_start_insn
+ *
+ * This is called when we unwind state in the middle of a TB,
+ * usually before raising an exception. Set all part of the CPU
+ * state which are tracked insn-by-insn in the target-specific
+ * arguments to start_insn, passed as @data.
+ */
+ void (*restore_state_to_opc)(CPUState *cpu, const TranslationBlock *tb,
+ const uint64_t *data);
+
+ /** @cpu_exec_enter: Callback for cpu_exec preparation */
+ void (*cpu_exec_enter)(CPUState *cpu);
+ /** @cpu_exec_exit: Callback for cpu_exec cleanup */
+ void (*cpu_exec_exit)(CPUState *cpu);
+ /** @debug_excp_handler: Callback for handling debug exceptions */
+ void (*debug_excp_handler)(CPUState *cpu);
+
+ /** @mmu_index: Callback for choosing softmmu mmu index */
+ int (*mmu_index)(CPUState *cpu, bool ifetch);
+
+#ifdef CONFIG_USER_ONLY
+ /**
+ * @fake_user_interrupt: Callback for 'fake exception' handling.
+ *
+ * Simulate 'fake exception' which will be handled outside the
+ * cpu execution loop (hack for x86 user mode).
+ */
+ void (*fake_user_interrupt)(CPUState *cpu);
+
+ /**
+ * record_sigsegv:
+ * @cpu: cpu context
+ * @addr: faulting guest address
+ * @access_type: access was read/write/execute
+ * @maperr: true for invalid page, false for permission fault
+ * @ra: host pc for unwinding
+ *
+ * We are about to raise SIGSEGV with si_code set for @maperr,
+ * and si_addr set for @addr. Record anything further needed
+ * for the signal ucontext_t.
+ *
+ * If the emulated kernel does not provide anything to the signal
+ * handler with anything besides the user context registers, and
+ * the siginfo_t, then this hook need do nothing and may be omitted.
+ * Otherwise, record the data and return; the caller will raise
+ * the signal, unwind the cpu state, and return to the main loop.
+ *
+ * If it is simpler to re-use the sysemu tlb_fill code, @ra is provided
+ * so that a "normal" cpu exception can be raised. In this case,
+ * the signal must be raised by the architecture cpu_loop.
+ */
+ void (*record_sigsegv)(CPUState *cpu, vaddr addr,
+ MMUAccessType access_type,
+ bool maperr, uintptr_t ra);
+ /**
+ * record_sigbus:
+ * @cpu: cpu context
+ * @addr: misaligned guest address
+ * @access_type: access was read/write/execute
+ * @ra: host pc for unwinding
+ *
+ * We are about to raise SIGBUS with si_code BUS_ADRALN,
+ * and si_addr set for @addr. Record anything further needed
+ * for the signal ucontext_t.
+ *
+ * If the emulated kernel does not provide the signal handler with
+ * anything besides the user context registers, and the siginfo_t,
+ * then this hook need do nothing and may be omitted.
+ * Otherwise, record the data and return; the caller will raise
+ * the signal, unwind the cpu state, and return to the main loop.
+ *
+ * If it is simpler to re-use the sysemu do_unaligned_access code,
+ * @ra is provided so that a "normal" cpu exception can be raised.
+ * In this case, the signal must be raised by the architecture cpu_loop.
+ */
+ void (*record_sigbus)(CPUState *cpu, vaddr addr,
+ MMUAccessType access_type, uintptr_t ra);
+
+ /**
+ * untagged_addr: Remove an ignored tag from an address
+ * @cpu: cpu context
+ * @addr: tagged guest address
+ */
+ vaddr (*untagged_addr)(CPUState *cs, vaddr addr);
+#else
+ /** @do_interrupt: Callback for interrupt handling. */
+ void (*do_interrupt)(CPUState *cpu);
+ /** @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec */
+ bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request);
+ /** @cpu_exec_reset: Callback for reset in cpu_exec. */
+ void (*cpu_exec_reset)(CPUState *cpu);
+ /**
+ * @cpu_exec_halt: Callback for handling halt in cpu_exec.
+ *
+ * The target CPU should do any special processing here that it needs
+ * to do when the CPU is in the halted state.
+ *
+ * Return true to indicate that the CPU should now leave halt, false
+ * if it should remain in the halted state. (This should generally
+ * be the same value that cpu_has_work() would return.)
+ *
+ * This method must be provided. If the target does not need to
+ * do anything special for halt, the same function used for its
+ * SysemuCPUOps::has_work method can be used here, as they have the
+ * same function signature.
+ */
+ bool (*cpu_exec_halt)(CPUState *cpu);
+ /**
+ * @tlb_fill_align: Handle a softmmu tlb miss
+ * @cpu: cpu context
+ * @out: output page properties
+ * @addr: virtual address
+ * @access_type: read, write or execute
+ * @mmu_idx: mmu context
+ * @memop: memory operation for the access
+ * @size: memory access size, or 0 for whole page
+ * @probe: test only, no fault
+ * @ra: host return address for exception unwind
+ *
+ * If the access is valid, fill in @out and return true.
+ * Otherwise if probe is true, return false.
+ * Otherwise raise an exception and do not return.
+ *
+ * The alignment check for the access is deferred to this hook,
+ * so that the target can determine the priority of any alignment
+ * fault with respect to other potential faults from paging.
+ * Zero may be passed for @memop to skip any alignment check
+ * for non-memory-access operations such as probing.
+ */
+ bool (*tlb_fill_align)(CPUState *cpu, CPUTLBEntryFull *out, vaddr addr,
+ MMUAccessType access_type, int mmu_idx,
+ MemOp memop, int size, bool probe, uintptr_t ra);
+ /**
+ * @tlb_fill: Handle a softmmu tlb miss
+ *
+ * If the access is valid, call tlb_set_page and return true;
+ * if the access is invalid and probe is true, return false;
+ * otherwise raise an exception and do not return.
+ */
+ bool (*tlb_fill)(CPUState *cpu, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
+ /**
+ * @pointer_wrap:
+ *
+ * We have incremented @base to @result, resulting in a page change.
+ * For the current cpu state, adjust @result for possible overflow.
+ */
+ vaddr (*pointer_wrap)(CPUState *cpu, int mmu_idx, vaddr result, vaddr base);
+ /**
+ * @do_transaction_failed: Callback for handling failed memory transactions
+ * (ie bus faults or external aborts; not MMU faults)
+ */
+ void (*do_transaction_failed)(CPUState *cpu, hwaddr physaddr, vaddr addr,
+ unsigned size, MMUAccessType access_type,
+ int mmu_idx, MemTxAttrs attrs,
+ MemTxResult response, uintptr_t retaddr);
+ /**
+ * @do_unaligned_access: Callback for unaligned access handling
+ * The callback must exit via raising an exception.
+ */
+ G_NORETURN void (*do_unaligned_access)(CPUState *cpu, vaddr addr,
+ MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr);
+
+ /**
+ * @adjust_watchpoint_address: hack for cpu_check_watchpoint used by ARM
+ */
+ vaddr (*adjust_watchpoint_address)(CPUState *cpu, vaddr addr, int len);
+
+ /**
+ * @debug_check_watchpoint: return true if the architectural
+ * watchpoint whose address has matched should really fire, used by ARM
+ * and RISC-V
+ */
+ bool (*debug_check_watchpoint)(CPUState *cpu, CPUWatchpoint *wp);
+
+ /**
+ * @debug_check_breakpoint: return true if the architectural
+ * breakpoint whose PC has matched should really fire.
+ */
+ bool (*debug_check_breakpoint)(CPUState *cpu);
+
+ /**
+ * @io_recompile_replay_branch: Callback for cpu_io_recompile.
+ *
+ * The cpu has been stopped, and cpu_restore_state_from_tb has been
+ * called. If the faulting instruction is in a delay slot, and the
+ * target architecture requires re-execution of the branch, then
+ * adjust the cpu state as required and return true.
+ */
+ bool (*io_recompile_replay_branch)(CPUState *cpu,
+ const TranslationBlock *tb);
+ /**
+ * @need_replay_interrupt: Return %true if @interrupt_request
+ * needs to be recorded for replay purposes.
+ */
+ bool (*need_replay_interrupt)(int interrupt_request);
+#endif /* !CONFIG_USER_ONLY */
+};
+
+#if defined(CONFIG_USER_ONLY)
+
+static inline void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
+ MemTxAttrs atr, int fl, uintptr_t ra)
+{
+}
+
+static inline int cpu_watchpoint_address_matches(CPUState *cpu,
+ vaddr addr, vaddr len)
+{
+ return 0;
+}
+
+#else
+
+/**
+ * cpu_check_watchpoint:
+ * @cpu: cpu context
+ * @addr: guest virtual address
+ * @len: access length
+ * @attrs: memory access attributes
+ * @flags: watchpoint access type
+ * @ra: unwind return address
+ *
+ * Check for a watchpoint hit in [addr, addr+len) of the type
+ * specified by @flags. Exit via exception with a hit.
+ */
+void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
+ MemTxAttrs attrs, int flags, uintptr_t ra);
+
+/**
+ * cpu_watchpoint_address_matches:
+ * @cpu: cpu context
+ * @addr: guest virtual address
+ * @len: access length
+ *
+ * Return the watchpoint flags that apply to [addr, addr+len).
+ * If no watchpoint is registered for the range, the result is 0.
+ */
+int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len);
+
+/*
+ * Common pointer_wrap implementations.
+ */
+vaddr cpu_pointer_wrap_notreached(CPUState *, int, vaddr, vaddr);
+vaddr cpu_pointer_wrap_uint32(CPUState *, int, vaddr, vaddr);
+
+#endif
+
+#endif /* TCG_CPU_OPS_H */
diff --git a/include/accel/tcg/getpc.h b/include/accel/tcg/getpc.h
new file mode 100644
index 0000000..0fc08ad
--- /dev/null
+++ b/include/accel/tcg/getpc.h
@@ -0,0 +1,20 @@
+/*
+ * Get host pc for helper unwinding.
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ */
+
+#ifndef ACCEL_TCG_GETPC_H
+#define ACCEL_TCG_GETPC_H
+
+/* GETPC is the true target of the return instruction that we'll execute. */
+#ifdef CONFIG_TCG_INTERPRETER
+extern __thread uintptr_t tci_tb_ptr;
+# define GETPC() tci_tb_ptr
+#else
+# define GETPC() \
+ ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
+#endif
+
+#endif /* ACCEL_TCG_GETPC_H */
diff --git a/include/accel/tcg/helper-retaddr.h b/include/accel/tcg/helper-retaddr.h
new file mode 100644
index 0000000..037fda2
--- /dev/null
+++ b/include/accel/tcg/helper-retaddr.h
@@ -0,0 +1,43 @@
+/*
+ * Get user helper pc for memory unwinding.
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ */
+
+#ifndef ACCEL_TCG_HELPER_RETADDR_H
+#define ACCEL_TCG_HELPER_RETADDR_H
+
+/*
+ * For user-only, helpers that use guest to host address translation
+ * must protect the actual host memory access by recording 'retaddr'
+ * for the signal handler. This is required for a race condition in
+ * which another thread unmaps the page between a probe and the
+ * actual access.
+ */
+#ifdef CONFIG_USER_ONLY
+extern __thread uintptr_t helper_retaddr;
+
+static inline void set_helper_retaddr(uintptr_t ra)
+{
+ helper_retaddr = ra;
+ /*
+ * Ensure that this write is visible to the SIGSEGV handler that
+ * may be invoked due to a subsequent invalid memory operation.
+ */
+ signal_barrier();
+}
+
+static inline void clear_helper_retaddr(void)
+{
+ /*
+ * Ensure that previous memory operations have succeeded before
+ * removing the data visible to the signal handler.
+ */
+ signal_barrier();
+ helper_retaddr = 0;
+}
+#else
+#define set_helper_retaddr(ra) do { } while (0)
+#define clear_helper_retaddr() do { } while (0)
+#endif
+
+#endif /* ACCEL_TCG_HELPER_RETADDR_H */
diff --git a/include/accel/tcg/iommu.h b/include/accel/tcg/iommu.h
new file mode 100644
index 0000000..90cfd6c
--- /dev/null
+++ b/include/accel/tcg/iommu.h
@@ -0,0 +1,41 @@
+/*
+ * TCG IOMMU translations.
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ */
+#ifndef ACCEL_TCG_IOMMU_H
+#define ACCEL_TCG_IOMMU_H
+
+#ifdef CONFIG_USER_ONLY
+#error Cannot include accel/tcg/iommu.h from user emulation
+#endif
+
+#include "exec/hwaddr.h"
+#include "exec/memattrs.h"
+
+/**
+ * iotlb_to_section:
+ * @cpu: CPU performing the access
+ * @index: TCG CPU IOTLB entry
+ *
+ * Given a TCG CPU IOTLB entry, return the MemoryRegionSection that
+ * it refers to. @index will have been initially created and returned
+ * by memory_region_section_get_iotlb().
+ */
+MemoryRegionSection *iotlb_to_section(CPUState *cpu,
+ hwaddr index, MemTxAttrs attrs);
+
+MemoryRegionSection *address_space_translate_for_iotlb(CPUState *cpu,
+ int asidx,
+ hwaddr addr,
+ hwaddr *xlat,
+ hwaddr *plen,
+ MemTxAttrs attrs,
+ int *prot);
+
+hwaddr memory_region_section_get_iotlb(CPUState *cpu,
+ MemoryRegionSection *section);
+
+#endif
+
diff --git a/include/accel/tcg/probe.h b/include/accel/tcg/probe.h
new file mode 100644
index 0000000..dd9ecbb
--- /dev/null
+++ b/include/accel/tcg/probe.h
@@ -0,0 +1,122 @@
+/*
+ * Probe guest virtual addresses for access permissions.
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ */
+#ifndef ACCEL_TCG_PROBE_H
+#define ACCEL_TCG_PROBE_H
+
+#include "exec/mmu-access-type.h"
+#include "exec/vaddr.h"
+
+/**
+ * probe_access:
+ * @env: CPUArchState
+ * @addr: guest virtual address to look up
+ * @size: size of the access
+ * @access_type: read, write or execute permission
+ * @mmu_idx: MMU index to use for lookup
+ * @retaddr: return address for unwinding
+ *
+ * Look up the guest virtual address @addr. Raise an exception if the
+ * page does not satisfy @access_type. Raise an exception if the
+ * access (@addr, @size) hits a watchpoint. For writes, mark a clean
+ * page as dirty.
+ *
+ * Finally, return the host address for a page that is backed by RAM,
+ * or NULL if the page requires I/O.
+ */
+void *probe_access(CPUArchState *env, vaddr addr, int size,
+ MMUAccessType access_type, int mmu_idx, uintptr_t retaddr);
+
+static inline void *probe_write(CPUArchState *env, vaddr addr, int size,
+ int mmu_idx, uintptr_t retaddr)
+{
+ return probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, retaddr);
+}
+
+static inline void *probe_read(CPUArchState *env, vaddr addr, int size,
+ int mmu_idx, uintptr_t retaddr)
+{
+ return probe_access(env, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
+}
+
+/**
+ * probe_access_flags:
+ * @env: CPUArchState
+ * @addr: guest virtual address to look up
+ * @size: size of the access
+ * @access_type: read, write or execute permission
+ * @mmu_idx: MMU index to use for lookup
+ * @nonfault: suppress the fault
+ * @phost: return value for host address
+ * @retaddr: return address for unwinding
+ *
+ * Similar to probe_access, loosely returning the TLB_FLAGS_MASK for
+ * the page, and storing the host address for RAM in @phost.
+ *
+ * If @nonfault is set, do not raise an exception but return TLB_INVALID_MASK.
+ * Do not handle watchpoints, but include TLB_WATCHPOINT in the returned flags.
+ * Do handle clean pages, so exclude TLB_NOTDIRY from the returned flags.
+ * For simplicity, all "mmio-like" flags are folded to TLB_MMIO.
+ */
+int probe_access_flags(CPUArchState *env, vaddr addr, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool nonfault, void **phost, uintptr_t retaddr);
+
+#ifndef CONFIG_USER_ONLY
+
+/**
+ * probe_access_full:
+ * Like probe_access_flags, except also return into @pfull.
+ *
+ * The CPUTLBEntryFull structure returned via @pfull is transient
+ * and must be consumed or copied immediately, before any further
+ * access or changes to TLB @mmu_idx.
+ *
+ * This function will not fault if @nonfault is set, but will
+ * return TLB_INVALID_MASK if the page is not mapped, or is not
+ * accessible with @access_type.
+ *
+ * This function will return TLB_MMIO in order to force the access
+ * to be handled out-of-line if plugins wish to instrument the access.
+ */
+int probe_access_full(CPUArchState *env, vaddr addr, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool nonfault, void **phost,
+ CPUTLBEntryFull **pfull, uintptr_t retaddr);
+
+/**
+ * probe_access_full_mmu:
+ * Like probe_access_full, except:
+ *
+ * This function is intended to be used for page table accesses by
+ * the target mmu itself. Since such page walking happens while
+ * handling another potential mmu fault, this function never raises
+ * exceptions (akin to @nonfault true for probe_access_full).
+ * Likewise this function does not trigger plugin instrumentation.
+ */
+int probe_access_full_mmu(CPUArchState *env, vaddr addr, int size,
+ MMUAccessType access_type, int mmu_idx,
+ void **phost, CPUTLBEntryFull **pfull);
+
+#endif /* !CONFIG_USER_ONLY */
+
+/**
+ * tlb_vaddr_to_host:
+ * @env: CPUArchState
+ * @addr: guest virtual address to look up
+ * @access_type: 0 for read, 1 for write, 2 for execute
+ * @mmu_idx: MMU index to use for lookup
+ *
+ * Look up the specified guest virtual index in the TCG softmmu TLB.
+ * If we can translate a host virtual address suitable for direct RAM
+ * access, without causing a guest exception, then return it.
+ * Otherwise (TLB entry is for an I/O access, guest software
+ * TLB fill required, etc) return NULL.
+ */
+void *tlb_vaddr_to_host(CPUArchState *env, vaddr addr,
+ MMUAccessType access_type, int mmu_idx);
+
+#endif
diff --git a/include/accel/tcg/tb-cpu-state.h b/include/accel/tcg/tb-cpu-state.h
new file mode 100644
index 0000000..8f91290
--- /dev/null
+++ b/include/accel/tcg/tb-cpu-state.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Definition of TCGTBCPUState.
+ */
+
+#ifndef EXEC_TB_CPU_STATE_H
+#define EXEC_TB_CPU_STATE_H
+
+#include "exec/vaddr.h"
+
+typedef struct TCGTBCPUState {
+ vaddr pc;
+ uint32_t flags;
+ uint32_t cflags;
+ uint64_t cs_base;
+} TCGTBCPUState;
+
+#endif