aboutsummaryrefslogtreecommitdiff
path: root/accel/tcg/cputlb.c
diff options
context:
space:
mode:
authorClaudio Fontana <cfontana@suse.de>2021-02-04 17:39:23 +0100
committerRichard Henderson <richard.henderson@linaro.org>2021-02-05 10:24:15 -1000
commit78271684719f34c1cc19f895e089f2f19b69698d (patch)
tree5f47406eb8c2be4e37e411e5053678e4d91e09d3 /accel/tcg/cputlb.c
parentc73bdb35a91fb6b17c2c93b1ba381fc88a406f8d (diff)
downloadqemu-78271684719f34c1cc19f895e089f2f19b69698d.zip
qemu-78271684719f34c1cc19f895e089f2f19b69698d.tar.gz
qemu-78271684719f34c1cc19f895e089f2f19b69698d.tar.bz2
cpu: tcg_ops: move to tcg-cpu-ops.h, keep a pointer in CPUClass
we cannot in principle make the TCG Operations field definitions conditional on CONFIG_TCG in code that is included by both common_ss and specific_ss modules. Therefore, what we can do safely to restrict the TCG fields to TCG-only builds, is to move all tcg cpu operations into a separate header file, which is only included by TCG, target-specific code. This leaves just a NULL pointer in the cpu.h for the non-TCG builds. This also tidies up the code in all targets a bit, having all TCG cpu operations neatly contained by a dedicated data struct. Signed-off-by: Claudio Fontana <cfontana@suse.de> Message-Id: <20210204163931.7358-16-cfontana@suse.de> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'accel/tcg/cputlb.c')
-rw-r--r--accel/tcg/cputlb.c35
1 files changed, 31 insertions, 4 deletions
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index b771780..8a7b779 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -20,6 +20,7 @@
#include "qemu/osdep.h"
#include "qemu/main-loop.h"
#include "cpu.h"
+#include "hw/core/tcg-cpu-ops.h"
#include "exec/exec-all.h"
#include "exec/memory.h"
#include "exec/address-spaces.h"
@@ -1305,11 +1306,37 @@ static void tlb_fill(CPUState *cpu, target_ulong addr, int size,
* This is not a probe, so only valid return is success; failure
* should result in exception + longjmp to the cpu loop.
*/
- ok = cc->tcg_ops.tlb_fill(cpu, addr, size,
- access_type, mmu_idx, false, retaddr);
+ ok = cc->tcg_ops->tlb_fill(cpu, addr, size,
+ access_type, mmu_idx, false, retaddr);
assert(ok);
}
+static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
+ MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr)
+{
+ CPUClass *cc = CPU_GET_CLASS(cpu);
+
+ cc->tcg_ops->do_unaligned_access(cpu, addr, access_type, mmu_idx, retaddr);
+}
+
+static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
+ vaddr addr, unsigned size,
+ MMUAccessType access_type,
+ int mmu_idx, MemTxAttrs attrs,
+ MemTxResult response,
+ uintptr_t retaddr)
+{
+ CPUClass *cc = CPU_GET_CLASS(cpu);
+
+ if (!cpu->ignore_memory_transaction_failures &&
+ cc->tcg_ops->do_transaction_failed) {
+ cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size,
+ access_type, mmu_idx, attrs,
+ response, retaddr);
+ }
+}
+
static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
int mmu_idx, target_ulong addr, uintptr_t retaddr,
MMUAccessType access_type, MemOp op)
@@ -1577,8 +1604,8 @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
CPUState *cs = env_cpu(env);
CPUClass *cc = CPU_GET_CLASS(cs);
- if (!cc->tcg_ops.tlb_fill(cs, addr, fault_size, access_type,
- mmu_idx, nonfault, retaddr)) {
+ if (!cc->tcg_ops->tlb_fill(cs, addr, fault_size, access_type,
+ mmu_idx, nonfault, retaddr)) {
/* Non-faulting page table read failed. */
*phost = NULL;
return TLB_INVALID_MASK;