aboutsummaryrefslogtreecommitdiff
path: root/target
diff options
context:
space:
mode:
Diffstat (limited to 'target')
-rw-r--r--target/alpha/cpu.c5
-rw-r--r--target/alpha/cpu.h5
-rw-r--r--target/alpha/helper.c24
-rw-r--r--target/alpha/mem_helper.c16
-rw-r--r--target/arm/cpu.c22
-rw-r--r--target/arm/helper.c90
-rw-r--r--target/arm/internals.h10
-rw-r--r--target/arm/op_helper.c29
-rw-r--r--target/arm/sve_helper.c6
-rw-r--r--target/cris/cpu.c5
-rw-r--r--target/cris/cpu.h5
-rw-r--r--target/cris/helper.c61
-rw-r--r--target/cris/op_helper.c28
-rw-r--r--target/hppa/cpu.c5
-rw-r--r--target/hppa/cpu.h8
-rw-r--r--target/hppa/mem_helper.c16
-rw-r--r--target/i386/cpu.c5
-rw-r--r--target/i386/cpu.h5
-rw-r--r--target/i386/excp_helper.c53
-rw-r--r--target/i386/mem_helper.c21
-rw-r--r--target/lm32/cpu.c5
-rw-r--r--target/lm32/cpu.h5
-rw-r--r--target/lm32/helper.c8
-rw-r--r--target/lm32/op_helper.c16
-rw-r--r--target/m68k/cpu.c2
-rw-r--r--target/m68k/cpu.h5
-rw-r--r--target/m68k/helper.c87
-rw-r--r--target/m68k/op_helper.c15
-rw-r--r--target/microblaze/cpu.c5
-rw-r--r--target/microblaze/cpu.h5
-rw-r--r--target/microblaze/helper.c101
-rw-r--r--target/microblaze/op_helper.c19
-rw-r--r--target/mips/cpu.c5
-rw-r--r--target/mips/helper.c79
-rw-r--r--target/mips/internal.h5
-rw-r--r--target/mips/op_helper.c15
-rw-r--r--target/moxie/cpu.c5
-rw-r--r--target/moxie/cpu.h5
-rw-r--r--target/moxie/helper.c65
-rw-r--r--target/nios2/cpu.c5
-rw-r--r--target/nios2/cpu.h5
-rw-r--r--target/nios2/helper.c170
-rw-r--r--target/nios2/mmu.c12
-rw-r--r--target/openrisc/cpu.c5
-rw-r--r--target/openrisc/cpu.h5
-rw-r--r--target/openrisc/mmu.c69
-rw-r--r--target/ppc/cpu.h7
-rw-r--r--target/ppc/mmu_helper.c16
-rw-r--r--target/ppc/translate_init.inc.c5
-rw-r--r--target/ppc/user_only_helper.c14
-rw-r--r--target/riscv/cpu.c5
-rw-r--r--target/riscv/cpu.h5
-rw-r--r--target/riscv/cpu_helper.c50
-rw-r--r--target/s390x/cpu.c5
-rw-r--r--target/s390x/excp_helper.c67
-rw-r--r--target/s390x/internal.h5
-rw-r--r--target/s390x/mem_helper.c16
-rw-r--r--target/sh4/cpu.c5
-rw-r--r--target/sh4/cpu.h5
-rw-r--r--target/sh4/helper.c189
-rw-r--r--target/sh4/op_helper.c12
-rw-r--r--target/sparc/cpu.c5
-rw-r--r--target/sparc/cpu.h5
-rw-r--r--target/sparc/ldst_helper.c15
-rw-r--r--target/sparc/mmu_helper.c58
-rw-r--r--target/tilegx/cpu.c10
-rw-r--r--target/tricore/cpu.c1
-rw-r--r--target/tricore/cpu.h6
-rw-r--r--target/tricore/helper.c23
-rw-r--r--target/tricore/op_helper.c26
-rw-r--r--target/unicore32/cpu.c5
-rw-r--r--target/unicore32/cpu.h5
-rw-r--r--target/unicore32/helper.c23
-rw-r--r--target/unicore32/op_helper.c14
-rw-r--r--target/unicore32/softmmu.c13
-rw-r--r--target/xtensa/cpu.c5
-rw-r--r--target/xtensa/cpu.h5
-rw-r--r--target/xtensa/helper.c33
78 files changed, 758 insertions, 1042 deletions
diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c
index ad3588a..7c81be4 100644
--- a/target/alpha/cpu.c
+++ b/target/alpha/cpu.c
@@ -225,9 +225,8 @@ static void alpha_cpu_class_init(ObjectClass *oc, void *data)
cc->set_pc = alpha_cpu_set_pc;
cc->gdb_read_register = alpha_cpu_gdb_read_register;
cc->gdb_write_register = alpha_cpu_gdb_write_register;
-#ifdef CONFIG_USER_ONLY
- cc->handle_mmu_fault = alpha_cpu_handle_mmu_fault;
-#else
+ cc->tlb_fill = alpha_cpu_tlb_fill;
+#ifndef CONFIG_USER_ONLY
cc->do_transaction_failed = alpha_cpu_do_transaction_failed;
cc->do_unaligned_access = alpha_cpu_do_unaligned_access;
cc->get_phys_page_debug = alpha_cpu_get_phys_page_debug;
diff --git a/target/alpha/cpu.h b/target/alpha/cpu.h
index 63bf361..cf09112 100644
--- a/target/alpha/cpu.h
+++ b/target/alpha/cpu.h
@@ -475,8 +475,9 @@ void alpha_cpu_list(void);
is returned if the signal was handled by the virtual CPU. */
int cpu_alpha_signal_handler(int host_signum, void *pinfo,
void *puc);
-int alpha_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw,
- int mmu_idx);
+bool alpha_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
void QEMU_NORETURN dynamic_excp(CPUAlphaState *, uintptr_t, int, int);
void QEMU_NORETURN arith_excp(CPUAlphaState *, uintptr_t, int, uint64_t);
diff --git a/target/alpha/helper.c b/target/alpha/helper.c
index 7201576..5fe9c87 100644
--- a/target/alpha/helper.c
+++ b/target/alpha/helper.c
@@ -104,14 +104,15 @@ void cpu_alpha_store_gr(CPUAlphaState *env, unsigned reg, uint64_t val)
}
#if defined(CONFIG_USER_ONLY)
-int alpha_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
- int rw, int mmu_idx)
+bool alpha_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
AlphaCPU *cpu = ALPHA_CPU(cs);
cs->exception_index = EXCP_MMFAULT;
cpu->env.trap_arg0 = address;
- return 1;
+ cpu_loop_exit_restore(cs, retaddr);
}
#else
/* Returns the OSF/1 entMM failure indication, or -1 on success. */
@@ -248,26 +249,31 @@ hwaddr alpha_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
return (fail >= 0 ? -1 : phys);
}
-int alpha_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, int size, int rw,
- int mmu_idx)
+bool alpha_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
AlphaCPU *cpu = ALPHA_CPU(cs);
CPUAlphaState *env = &cpu->env;
target_ulong phys;
int prot, fail;
- fail = get_physical_address(env, addr, 1 << rw, mmu_idx, &phys, &prot);
+ fail = get_physical_address(env, addr, 1 << access_type,
+ mmu_idx, &phys, &prot);
if (unlikely(fail >= 0)) {
+ if (probe) {
+ return false;
+ }
cs->exception_index = EXCP_MMFAULT;
env->trap_arg0 = addr;
env->trap_arg1 = fail;
- env->trap_arg2 = (rw == 2 ? -1 : rw);
- return 1;
+ env->trap_arg2 = (access_type == MMU_INST_FETCH ? -1 : access_type);
+ cpu_loop_exit_restore(cs, retaddr);
}
tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
prot, mmu_idx, TARGET_PAGE_SIZE);
- return 0;
+ return true;
}
#endif /* USER_ONLY */
diff --git a/target/alpha/mem_helper.c b/target/alpha/mem_helper.c
index 011bc73..934faa1 100644
--- a/target/alpha/mem_helper.c
+++ b/target/alpha/mem_helper.c
@@ -62,20 +62,4 @@ void alpha_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
env->error_code = 0;
cpu_loop_exit_restore(cs, retaddr);
}
-
-/* try to fill the TLB and return an exception if error. If retaddr is
- NULL, it means that the function was called in C code (i.e. not
- from generated code or from helper.c) */
-/* XXX: fix it to restore all registers */
-void tlb_fill(CPUState *cs, target_ulong addr, int size,
- MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
-{
- int ret;
-
- ret = alpha_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
- if (unlikely(ret != 0)) {
- /* Exception index and error code are already set */
- cpu_loop_exit_restore(cs, retaddr);
- }
-}
#endif /* CONFIG_USER_ONLY */
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index a181fa8..8eee1d8 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -2133,23 +2133,6 @@ static Property arm_cpu_properties[] = {
DEFINE_PROP_END_OF_LIST()
};
-#ifdef CONFIG_USER_ONLY
-static int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
- int rw, int mmu_idx)
-{
- ARMCPU *cpu = ARM_CPU(cs);
- CPUARMState *env = &cpu->env;
-
- env->exception.vaddress = address;
- if (rw == 2) {
- cs->exception_index = EXCP_PREFETCH_ABORT;
- } else {
- cs->exception_index = EXCP_DATA_ABORT;
- }
- return 1;
-}
-#endif
-
static gchar *arm_gdb_arch_name(CPUState *cs)
{
ARMCPU *cpu = ARM_CPU(cs);
@@ -2182,9 +2165,7 @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
cc->synchronize_from_tb = arm_cpu_synchronize_from_tb;
cc->gdb_read_register = arm_cpu_gdb_read_register;
cc->gdb_write_register = arm_cpu_gdb_write_register;
-#ifdef CONFIG_USER_ONLY
- cc->handle_mmu_fault = arm_cpu_handle_mmu_fault;
-#else
+#ifndef CONFIG_USER_ONLY
cc->do_interrupt = arm_cpu_do_interrupt;
cc->do_unaligned_access = arm_cpu_do_unaligned_access;
cc->do_transaction_failed = arm_cpu_do_transaction_failed;
@@ -2209,6 +2190,7 @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
cc->disas_set_info = arm_disas_set_info;
#ifdef CONFIG_TCG
cc->tcg_initialize = arm_translate_init;
+ cc->tlb_fill = arm_cpu_tlb_fill;
#endif
}
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 1e6eb0d..e2d5c8e 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -12596,43 +12596,6 @@ static bool get_phys_addr(CPUARMState *env, target_ulong address,
}
}
-/* Walk the page table and (if the mapping exists) add the page
- * to the TLB. Return false on success, or true on failure. Populate
- * fsr with ARM DFSR/IFSR fault register format value on failure.
- */
-bool arm_tlb_fill(CPUState *cs, vaddr address,
- MMUAccessType access_type, int mmu_idx,
- ARMMMUFaultInfo *fi)
-{
- ARMCPU *cpu = ARM_CPU(cs);
- CPUARMState *env = &cpu->env;
- hwaddr phys_addr;
- target_ulong page_size;
- int prot;
- int ret;
- MemTxAttrs attrs = {};
-
- ret = get_phys_addr(env, address, access_type,
- core_to_arm_mmu_idx(env, mmu_idx), &phys_addr,
- &attrs, &prot, &page_size, fi, NULL);
- if (!ret) {
- /*
- * Map a single [sub]page. Regions smaller than our declared
- * target page size are handled specially, so for those we
- * pass in the exact addresses.
- */
- if (page_size >= TARGET_PAGE_SIZE) {
- phys_addr &= TARGET_PAGE_MASK;
- address &= TARGET_PAGE_MASK;
- }
- tlb_set_page_with_attrs(cs, address, phys_addr, attrs,
- prot, mmu_idx, page_size);
- return 0;
- }
-
- return ret;
-}
-
hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
MemTxAttrs *attrs)
{
@@ -13111,6 +13074,59 @@ uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
#endif
+bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+
+#ifdef CONFIG_USER_ONLY
+ cpu->env.exception.vaddress = address;
+ if (access_type == MMU_INST_FETCH) {
+ cs->exception_index = EXCP_PREFETCH_ABORT;
+ } else {
+ cs->exception_index = EXCP_DATA_ABORT;
+ }
+ cpu_loop_exit_restore(cs, retaddr);
+#else
+ hwaddr phys_addr;
+ target_ulong page_size;
+ int prot, ret;
+ MemTxAttrs attrs = {};
+ ARMMMUFaultInfo fi = {};
+
+ /*
+ * Walk the page table and (if the mapping exists) add the page
+ * to the TLB. On success, return true. Otherwise, if probing,
+ * return false. Otherwise populate fsr with ARM DFSR/IFSR fault
+ * register format, and signal the fault.
+ */
+ ret = get_phys_addr(&cpu->env, address, access_type,
+ core_to_arm_mmu_idx(&cpu->env, mmu_idx),
+ &phys_addr, &attrs, &prot, &page_size, &fi, NULL);
+ if (likely(!ret)) {
+ /*
+ * Map a single [sub]page. Regions smaller than our declared
+ * target page size are handled specially, so for those we
+ * pass in the exact addresses.
+ */
+ if (page_size >= TARGET_PAGE_SIZE) {
+ phys_addr &= TARGET_PAGE_MASK;
+ address &= TARGET_PAGE_MASK;
+ }
+ tlb_set_page_with_attrs(cs, address, phys_addr, attrs,
+ prot, mmu_idx, page_size);
+ return true;
+ } else if (probe) {
+ return false;
+ } else {
+ /* now we have a real cpu fault */
+ cpu_restore_state(cs, retaddr, true);
+ arm_deliver_fault(cpu, address, access_type, mmu_idx, &fi);
+ }
+#endif
+}
+
void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in)
{
/* Implement DC ZVA, which zeroes a fixed-length block of memory.
diff --git a/target/arm/internals.h b/target/arm/internals.h
index 587a1dd..5a02f45 100644
--- a/target/arm/internals.h
+++ b/target/arm/internals.h
@@ -761,10 +761,12 @@ static inline bool arm_extabort_type(MemTxResult result)
return result != MEMTX_DECODE_ERROR;
}
-/* Do a page table walk and add page to TLB if possible */
-bool arm_tlb_fill(CPUState *cpu, vaddr address,
- MMUAccessType access_type, int mmu_idx,
- ARMMMUFaultInfo *fi);
+bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
+
+void arm_deliver_fault(ARMCPU *cpu, vaddr addr, MMUAccessType access_type,
+ int mmu_idx, ARMMMUFaultInfo *fi) QEMU_NORETURN;
/* Return true if the stage 1 translation regime is using LPAE format page
* tables */
diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
index 8698b4d..8ee15a4 100644
--- a/target/arm/op_helper.c
+++ b/target/arm/op_helper.c
@@ -126,8 +126,8 @@ static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
return syn;
}
-static void deliver_fault(ARMCPU *cpu, vaddr addr, MMUAccessType access_type,
- int mmu_idx, ARMMMUFaultInfo *fi)
+void arm_deliver_fault(ARMCPU *cpu, vaddr addr, MMUAccessType access_type,
+ int mmu_idx, ARMMMUFaultInfo *fi)
{
CPUARMState *env = &cpu->env;
int target_el;
@@ -179,27 +179,6 @@ static void deliver_fault(ARMCPU *cpu, vaddr addr, MMUAccessType access_type,
raise_exception(env, exc, syn, target_el);
}
-/* try to fill the TLB and return an exception if error. If retaddr is
- * NULL, it means that the function was called in C code (i.e. not
- * from generated code or from helper.c)
- */
-void tlb_fill(CPUState *cs, target_ulong addr, int size,
- MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
-{
- bool ret;
- ARMMMUFaultInfo fi = {};
-
- ret = arm_tlb_fill(cs, addr, access_type, mmu_idx, &fi);
- if (unlikely(ret)) {
- ARMCPU *cpu = ARM_CPU(cs);
-
- /* now we have a real cpu fault */
- cpu_restore_state(cs, retaddr, true);
-
- deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
- }
-}
-
/* Raise a data fault alignment exception for the specified virtual address */
void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
MMUAccessType access_type,
@@ -212,7 +191,7 @@ void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
cpu_restore_state(cs, retaddr, true);
fi.type = ARMFault_Alignment;
- deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi);
+ arm_deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi);
}
/* arm_cpu_do_transaction_failed: handle a memory system error response
@@ -233,7 +212,7 @@ void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
fi.ea = arm_extabort_type(response);
fi.type = ARMFault_SyncExternal;
- deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
+ arm_deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
}
#endif /* !defined(CONFIG_USER_ONLY) */
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
index bc84725..fd434c6 100644
--- a/target/arm/sve_helper.c
+++ b/target/arm/sve_helper.c
@@ -4598,11 +4598,7 @@ static void sve_ldnf1_r(CPUARMState *env, void *vg, const target_ulong addr,
* in the real world, obviously.)
*
* Then there are the annoying special cases with watchpoints...
- *
- * TODO: Add a form of tlb_fill that does not raise an exception,
- * with a form of tlb_vaddr_to_host and a set of loads to match.
- * The non_fault_vaddr_to_host would handle everything, usually,
- * and the loads would handle the iomem path for watchpoints.
+ * TODO: Add a form of non-faulting loads using cc->tlb_fill(probe=true).
*/
host = tlb_vaddr_to_host(env, addr + mem_off, MMU_DATA_LOAD, mmu_idx);
split = max_for_page(addr, mem_off, mem_max);
diff --git a/target/cris/cpu.c b/target/cris/cpu.c
index 75729bf..4e5288a 100644
--- a/target/cris/cpu.c
+++ b/target/cris/cpu.c
@@ -269,9 +269,8 @@ static void cris_cpu_class_init(ObjectClass *oc, void *data)
cc->set_pc = cris_cpu_set_pc;
cc->gdb_read_register = cris_cpu_gdb_read_register;
cc->gdb_write_register = cris_cpu_gdb_write_register;
-#ifdef CONFIG_USER_ONLY
- cc->handle_mmu_fault = cris_cpu_handle_mmu_fault;
-#else
+ cc->tlb_fill = cris_cpu_tlb_fill;
+#ifndef CONFIG_USER_ONLY
cc->get_phys_page_debug = cris_cpu_get_phys_page_debug;
dc->vmsd = &vmstate_cris_cpu;
#endif
diff --git a/target/cris/cpu.h b/target/cris/cpu.h
index 0fbe771..857de79 100644
--- a/target/cris/cpu.h
+++ b/target/cris/cpu.h
@@ -281,8 +281,9 @@ static inline int cpu_mmu_index (CPUCRISState *env, bool ifetch)
return !!(env->pregs[PR_CCS] & U_FLAG);
}
-int cris_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw,
- int mmu_idx);
+bool cris_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
/* Support function regs. */
#define SFR_RW_GC_CFG 0][0
diff --git a/target/cris/helper.c b/target/cris/helper.c
index 3939603..b5159b8 100644
--- a/target/cris/helper.c
+++ b/target/cris/helper.c
@@ -24,6 +24,7 @@
#include "qemu/host-utils.h"
#include "exec/exec-all.h"
#include "exec/cpu_ldst.h"
+#include "exec/helper-proto.h"
//#define CRIS_HELPER_DEBUG
@@ -53,15 +54,15 @@ void crisv10_cpu_do_interrupt(CPUState *cs)
cris_cpu_do_interrupt(cs);
}
-int cris_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
- int mmu_idx)
+bool cris_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
CRISCPU *cpu = CRIS_CPU(cs);
cs->exception_index = 0xaa;
cpu->env.pregs[PR_EDA] = address;
- cpu_dump_state(cs, stderr, 0);
- return 1;
+ cpu_loop_exit_restore(cs, retaddr);
}
#else /* !CONFIG_USER_ONLY */
@@ -76,33 +77,19 @@ static void cris_shift_ccs(CPUCRISState *env)
env->pregs[PR_CCS] = ccs;
}
-int cris_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
- int mmu_idx)
+bool cris_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
CRISCPU *cpu = CRIS_CPU(cs);
CPUCRISState *env = &cpu->env;
struct cris_mmu_result res;
int prot, miss;
- int r = -1;
target_ulong phy;
- qemu_log_mask(CPU_LOG_MMU, "%s addr=%" VADDR_PRIx " pc=%x rw=%x\n",
- __func__, address, env->pc, rw);
miss = cris_mmu_translate(&res, env, address & TARGET_PAGE_MASK,
- rw, mmu_idx, 0);
- if (miss) {
- if (cs->exception_index == EXCP_BUSFAULT) {
- cpu_abort(cs,
- "CRIS: Illegal recursive bus fault."
- "addr=%" VADDR_PRIx " rw=%d\n",
- address, rw);
- }
-
- env->pregs[PR_EDA] = address;
- cs->exception_index = EXCP_BUSFAULT;
- env->fault_vector = res.bf_vec;
- r = 1;
- } else {
+ access_type, mmu_idx, 0);
+ if (likely(!miss)) {
/*
* Mask off the cache selection bit. The ETRAX busses do not
* see the top bit.
@@ -111,15 +98,29 @@ int cris_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
prot = res.prot;
tlb_set_page(cs, address & TARGET_PAGE_MASK, phy,
prot, mmu_idx, TARGET_PAGE_SIZE);
- r = 0;
+ return true;
+ }
+
+ if (probe) {
+ return false;
}
- if (r > 0) {
- qemu_log_mask(CPU_LOG_MMU,
- "%s returns %d irqreq=%x addr=%" VADDR_PRIx " phy=%x vec=%x"
- " pc=%x\n", __func__, r, cs->interrupt_request, address,
- res.phy, res.bf_vec, env->pc);
+
+ if (cs->exception_index == EXCP_BUSFAULT) {
+ cpu_abort(cs, "CRIS: Illegal recursive bus fault."
+ "addr=%" VADDR_PRIx " access_type=%d\n",
+ address, access_type);
+ }
+
+ env->pregs[PR_EDA] = address;
+ cs->exception_index = EXCP_BUSFAULT;
+ env->fault_vector = res.bf_vec;
+ if (retaddr) {
+ if (cpu_restore_state(cs, retaddr, true)) {
+ /* Evaluate flags after retranslation. */
+ helper_top_evaluate_flags(env);
+ }
}
- return r;
+ cpu_loop_exit(cs);
}
void crisv10_cpu_do_interrupt(CPUState *cs)
diff --git a/target/cris/op_helper.c b/target/cris/op_helper.c
index 0ee3a31..26a395b 100644
--- a/target/cris/op_helper.c
+++ b/target/cris/op_helper.c
@@ -37,34 +37,6 @@
#define D_LOG(...) do { } while (0)
#endif
-#if !defined(CONFIG_USER_ONLY)
-/* Try to fill the TLB and return an exception if error. If retaddr is
- NULL, it means that the function was called in C code (i.e. not
- from generated code or from helper.c) */
-void tlb_fill(CPUState *cs, target_ulong addr, int size,
- MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
-{
- CRISCPU *cpu = CRIS_CPU(cs);
- CPUCRISState *env = &cpu->env;
- int ret;
-
- D_LOG("%s pc=%x tpc=%x ra=%p\n", __func__,
- env->pc, env->pregs[PR_EDA], (void *)retaddr);
- ret = cris_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
- if (unlikely(ret)) {
- if (retaddr) {
- /* now we have a real cpu fault */
- if (cpu_restore_state(cs, retaddr, true)) {
- /* Evaluate flags after retranslation. */
- helper_top_evaluate_flags(env);
- }
- }
- cpu_loop_exit(cs);
- }
-}
-
-#endif
-
void helper_raise_exception(CPUCRISState *env, uint32_t index)
{
CPUState *cs = CPU(cris_env_get_cpu(env));
diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c
index e64f485..9717ea1 100644
--- a/target/hppa/cpu.c
+++ b/target/hppa/cpu.c
@@ -163,9 +163,8 @@ static void hppa_cpu_class_init(ObjectClass *oc, void *data)
cc->synchronize_from_tb = hppa_cpu_synchronize_from_tb;
cc->gdb_read_register = hppa_cpu_gdb_read_register;
cc->gdb_write_register = hppa_cpu_gdb_write_register;
-#ifdef CONFIG_USER_ONLY
- cc->handle_mmu_fault = hppa_cpu_handle_mmu_fault;
-#else
+ cc->tlb_fill = hppa_cpu_tlb_fill;
+#ifndef CONFIG_USER_ONLY
cc->get_phys_page_debug = hppa_cpu_get_phys_page_debug;
dc->vmsd = &vmstate_hppa_cpu;
#endif
diff --git a/target/hppa/cpu.h b/target/hppa/cpu.h
index 923346a..c1e0215 100644
--- a/target/hppa/cpu.h
+++ b/target/hppa/cpu.h
@@ -360,10 +360,10 @@ int hppa_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
void hppa_cpu_do_interrupt(CPUState *cpu);
bool hppa_cpu_exec_interrupt(CPUState *cpu, int int_req);
void hppa_cpu_dump_state(CPUState *cs, FILE *f, int);
-#ifdef CONFIG_USER_ONLY
-int hppa_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size,
- int rw, int midx);
-#else
+bool hppa_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
+#ifndef CONFIG_USER_ONLY
int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
int type, hwaddr *pphys, int *pprot);
extern const MemoryRegionOps hppa_io_eir_ops;
diff --git a/target/hppa/mem_helper.c b/target/hppa/mem_helper.c
index 77fb544..0fd3ac6 100644
--- a/target/hppa/mem_helper.c
+++ b/target/hppa/mem_helper.c
@@ -25,8 +25,9 @@
#include "trace.h"
#ifdef CONFIG_USER_ONLY
-int hppa_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
- int size, int rw, int mmu_idx)
+bool hppa_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
HPPACPU *cpu = HPPA_CPU(cs);
@@ -34,7 +35,7 @@ int hppa_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
which would affect si_code. */
cs->exception_index = EXCP_DMP;
cpu->env.cr[CR_IOR] = address;
- return 1;
+ cpu_loop_exit_restore(cs, retaddr);
}
#else
static hppa_tlb_entry *hppa_find_tlb(CPUHPPAState *env, vaddr addr)
@@ -213,8 +214,9 @@ hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
return excp == EXCP_DTLB_MISS ? -1 : phys;
}
-void tlb_fill(CPUState *cs, target_ulong addr, int size,
- MMUAccessType type, int mmu_idx, uintptr_t retaddr)
+bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
+ MMUAccessType type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
HPPACPU *cpu = HPPA_CPU(cs);
CPUHPPAState *env = &cpu->env;
@@ -236,6 +238,9 @@ void tlb_fill(CPUState *cs, target_ulong addr, int size,
excp = hppa_get_physical_address(env, addr, mmu_idx,
a_prot, &phys, &prot);
if (unlikely(excp >= 0)) {
+ if (probe) {
+ return false;
+ }
trace_hppa_tlb_fill_excp(env, addr, size, type, mmu_idx);
/* Failure. Raise the indicated exception. */
cs->exception_index = excp;
@@ -252,6 +257,7 @@ void tlb_fill(CPUState *cs, target_ulong addr, int size,
/* Success! Store the translation into the QEMU TLB. */
tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
prot, mmu_idx, TARGET_PAGE_SIZE);
+ return true;
}
/* Insert (Insn/Data) TLB Address. Note this is PA 1.1 only. */
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index 722c551..2df56fa 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -5915,9 +5915,7 @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
cc->gdb_write_register = x86_cpu_gdb_write_register;
cc->get_arch_id = x86_cpu_get_arch_id;
cc->get_paging_enabled = x86_cpu_get_paging_enabled;
-#ifdef CONFIG_USER_ONLY
- cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
-#else
+#ifndef CONFIG_USER_ONLY
cc->asidx_from_attrs = x86_asidx_from_attrs;
cc->get_memory_mapping = x86_cpu_get_memory_mapping;
cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
@@ -5942,6 +5940,7 @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
cc->cpu_exec_exit = x86_cpu_exec_exit;
#ifdef CONFIG_TCG
cc->tcg_initialize = tcg_x86_init;
+ cc->tlb_fill = x86_cpu_tlb_fill;
#endif
cc->disas_set_info = x86_disas_set_info;
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index 0128910..fce6660 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -1656,8 +1656,9 @@ void host_cpuid(uint32_t function, uint32_t count,
void host_vendor_fms(char *vendor, int *family, int *model, int *stepping);
/* helper.c */
-int x86_cpu_handle_mmu_fault(CPUState *cpu, vaddr addr, int size,
- int is_write, int mmu_idx);
+bool x86_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
void x86_cpu_set_a20(X86CPU *cpu, int a20_state);
#ifndef CONFIG_USER_ONLY
diff --git a/target/i386/excp_helper.c b/target/i386/excp_helper.c
index 49231f6..fa1ead6 100644
--- a/target/i386/excp_helper.c
+++ b/target/i386/excp_helper.c
@@ -137,26 +137,7 @@ void raise_exception_ra(CPUX86State *env, int exception_index, uintptr_t retaddr
raise_interrupt2(env, exception_index, 0, 0, 0, retaddr);
}
-#if defined(CONFIG_USER_ONLY)
-int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, int size,
- int is_write, int mmu_idx)
-{
- X86CPU *cpu = X86_CPU(cs);
- CPUX86State *env = &cpu->env;
-
- /* user mode only emulation */
- is_write &= 1;
- env->cr[2] = addr;
- env->error_code = (is_write << PG_ERROR_W_BIT);
- env->error_code |= PG_ERROR_U_MASK;
- cs->exception_index = EXCP0E_PAGE;
- env->exception_is_int = 0;
- env->exception_next_eip = -1;
- return 1;
-}
-
-#else
-
+#if !defined(CONFIG_USER_ONLY)
static hwaddr get_hphys(CPUState *cs, hwaddr gphys, MMUAccessType access_type,
int *prot)
{
@@ -365,8 +346,8 @@ static hwaddr get_hphys(CPUState *cs, hwaddr gphys, MMUAccessType access_type,
* 0 = nothing more to do
* 1 = generate PF fault
*/
-int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, int size,
- int is_write1, int mmu_idx)
+static int handle_mmu_fault(CPUState *cs, vaddr addr, int size,
+ int is_write1, int mmu_idx)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
@@ -691,3 +672,31 @@ do_check_protect_pse36:
return 1;
}
#endif
+
+bool x86_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+#ifdef CONFIG_USER_ONLY
+ /* user mode only emulation */
+ env->cr[2] = addr;
+ env->error_code = (access_type == MMU_DATA_STORE) << PG_ERROR_W_BIT;
+ env->error_code |= PG_ERROR_U_MASK;
+ cs->exception_index = EXCP0E_PAGE;
+ env->exception_is_int = 0;
+ env->exception_next_eip = -1;
+ cpu_loop_exit_restore(cs, retaddr);
+#else
+ env->retaddr = retaddr;
+ if (handle_mmu_fault(cs, addr, size, access_type, mmu_idx)) {
+ /* FIXME: On error in get_hphys we have already jumped out. */
+ g_assert(!probe);
+ raise_exception_err_ra(env, cs->exception_index,
+ env->error_code, retaddr);
+ }
+ return true;
+#endif
+}
diff --git a/target/i386/mem_helper.c b/target/i386/mem_helper.c
index 6cc53bc..1885df2 100644
--- a/target/i386/mem_helper.c
+++ b/target/i386/mem_helper.c
@@ -191,24 +191,3 @@ void helper_boundl(CPUX86State *env, target_ulong a0, int v)
raise_exception_ra(env, EXCP05_BOUND, GETPC());
}
}
-
-#if !defined(CONFIG_USER_ONLY)
-/* try to fill the TLB and return an exception if error. If retaddr is
- * NULL, it means that the function was called in C code (i.e. not
- * from generated code or from helper.c)
- */
-/* XXX: fix it to restore all registers */
-void tlb_fill(CPUState *cs, target_ulong addr, int size,
- MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
-{
- X86CPU *cpu = X86_CPU(cs);
- CPUX86State *env = &cpu->env;
- int ret;
-
- env->retaddr = retaddr;
- ret = x86_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
- if (ret) {
- raise_exception_err_ra(env, cs->exception_index, env->error_code, retaddr);
- }
-}
-#endif
diff --git a/target/lm32/cpu.c b/target/lm32/cpu.c
index 282da19..57c50c1 100644
--- a/target/lm32/cpu.c
+++ b/target/lm32/cpu.c
@@ -231,9 +231,8 @@ static void lm32_cpu_class_init(ObjectClass *oc, void *data)
cc->set_pc = lm32_cpu_set_pc;
cc->gdb_read_register = lm32_cpu_gdb_read_register;
cc->gdb_write_register = lm32_cpu_gdb_write_register;
-#ifdef CONFIG_USER_ONLY
- cc->handle_mmu_fault = lm32_cpu_handle_mmu_fault;
-#else
+ cc->tlb_fill = lm32_cpu_tlb_fill;
+#ifndef CONFIG_USER_ONLY
cc->get_phys_page_debug = lm32_cpu_get_phys_page_debug;
cc->vmsd = &vmstate_lm32_cpu;
#endif
diff --git a/target/lm32/cpu.h b/target/lm32/cpu.h
index 9b1e6c2..d224d44 100644
--- a/target/lm32/cpu.h
+++ b/target/lm32/cpu.h
@@ -261,8 +261,9 @@ bool lm32_cpu_do_semihosting(CPUState *cs);
#define cpu_list lm32_cpu_list
#define cpu_signal_handler cpu_lm32_signal_handler
-int lm32_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw,
- int mmu_idx);
+bool lm32_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
#include "exec/cpu-all.h"
diff --git a/target/lm32/helper.c b/target/lm32/helper.c
index a039a99..20ea17b 100644
--- a/target/lm32/helper.c
+++ b/target/lm32/helper.c
@@ -25,8 +25,9 @@
#include "exec/semihost.h"
#include "exec/log.h"
-int lm32_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
- int mmu_idx)
+bool lm32_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
LM32CPU *cpu = LM32_CPU(cs);
CPULM32State *env = &cpu->env;
@@ -40,8 +41,7 @@ int lm32_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
} else {
tlb_set_page(cs, address, address, prot, mmu_idx, TARGET_PAGE_SIZE);
}
-
- return 0;
+ return true;
}
hwaddr lm32_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
diff --git a/target/lm32/op_helper.c b/target/lm32/op_helper.c
index 234d55e..be12b11 100644
--- a/target/lm32/op_helper.c
+++ b/target/lm32/op_helper.c
@@ -143,21 +143,5 @@ uint32_t HELPER(rcsr_jrx)(CPULM32State *env)
{
return lm32_juart_get_jrx(env->juart_state);
}
-
-/* Try to fill the TLB and return an exception if error. If retaddr is
- * NULL, it means that the function was called in C code (i.e. not
- * from generated code or from helper.c)
- */
-void tlb_fill(CPUState *cs, target_ulong addr, int size,
- MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
-{
- int ret;
-
- ret = lm32_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
- if (unlikely(ret)) {
- /* now we have a real cpu fault */
- cpu_loop_exit_restore(cs, retaddr);
- }
-}
#endif
diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c
index 582e3a7..6f441bc 100644
--- a/target/m68k/cpu.c
+++ b/target/m68k/cpu.c
@@ -269,7 +269,7 @@ static void m68k_cpu_class_init(ObjectClass *c, void *data)
cc->set_pc = m68k_cpu_set_pc;
cc->gdb_read_register = m68k_cpu_gdb_read_register;
cc->gdb_write_register = m68k_cpu_gdb_write_register;
- cc->handle_mmu_fault = m68k_cpu_handle_mmu_fault;
+ cc->tlb_fill = m68k_cpu_tlb_fill;
#if defined(CONFIG_SOFTMMU)
cc->do_unassigned_access = m68k_cpu_unassigned_access;
cc->get_phys_page_debug = m68k_cpu_get_phys_page_debug;
diff --git a/target/m68k/cpu.h b/target/m68k/cpu.h
index ad41608..683d3e2 100644
--- a/target/m68k/cpu.h
+++ b/target/m68k/cpu.h
@@ -542,8 +542,9 @@ static inline int cpu_mmu_index (CPUM68KState *env, bool ifetch)
return (env->sr & SR_S) == 0 ? 1 : 0;
}
-int m68k_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw,
- int mmu_idx);
+bool m68k_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
void m68k_cpu_unassigned_access(CPUState *cs, hwaddr addr,
bool is_write, bool is_exec, int is_asi,
unsigned size);
diff --git a/target/m68k/helper.c b/target/m68k/helper.c
index d958a34..9fc9e64 100644
--- a/target/m68k/helper.c
+++ b/target/m68k/helper.c
@@ -353,20 +353,7 @@ void m68k_switch_sp(CPUM68KState *env)
env->current_sp = new_sp;
}
-#if defined(CONFIG_USER_ONLY)
-
-int m68k_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
- int mmu_idx)
-{
- M68kCPU *cpu = M68K_CPU(cs);
-
- cs->exception_index = EXCP_ACCESS;
- cpu->env.mmu.ar = address;
- return 1;
-}
-
-#else
-
+#if !defined(CONFIG_USER_ONLY)
/* MMU: 68040 only */
static void print_address_zone(uint32_t logical, uint32_t physical,
@@ -795,11 +782,36 @@ hwaddr m68k_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
return phys_addr;
}
-int m68k_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
- int mmu_idx)
+/*
+ * Notify CPU of a pending interrupt. Prioritization and vectoring should
+ * be handled by the interrupt controller. Real hardware only requests
+ * the vector when the interrupt is acknowledged by the CPU. For
+ * simplicity we calculate it when the interrupt is signalled.
+ */
+void m68k_set_irq_level(M68kCPU *cpu, int level, uint8_t vector)
+{
+ CPUState *cs = CPU(cpu);
+ CPUM68KState *env = &cpu->env;
+
+ env->pending_level = level;
+ env->pending_vector = vector;
+ if (level) {
+ cpu_interrupt(cs, CPU_INTERRUPT_HARD);
+ } else {
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
+ }
+}
+
+#endif
+
+bool m68k_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType qemu_access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
M68kCPU *cpu = M68K_CPU(cs);
CPUM68KState *env = &cpu->env;
+
+#ifndef CONFIG_USER_ONLY
hwaddr physical;
int prot;
int access_type;
@@ -812,32 +824,35 @@ int m68k_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
address & TARGET_PAGE_MASK,
PAGE_READ | PAGE_WRITE | PAGE_EXEC,
mmu_idx, TARGET_PAGE_SIZE);
- return 0;
+ return true;
}
- if (rw == 2) {
+ if (qemu_access_type == MMU_INST_FETCH) {
access_type = ACCESS_CODE;
- rw = 0;
} else {
access_type = ACCESS_DATA;
- if (rw) {
+ if (qemu_access_type == MMU_DATA_STORE) {
access_type |= ACCESS_STORE;
}
}
-
if (mmu_idx != MMU_USER_IDX) {
access_type |= ACCESS_SUPER;
}
ret = get_physical_address(&cpu->env, &physical, &prot,
address, access_type, &page_size);
- if (ret == 0) {
+ if (likely(ret == 0)) {
address &= TARGET_PAGE_MASK;
physical += address & (page_size - 1);
tlb_set_page(cs, address, physical,
prot, mmu_idx, TARGET_PAGE_SIZE);
- return 0;
+ return true;
}
+
+ if (probe) {
+ return false;
+ }
+
/* page fault */
env->mmu.ssw = M68K_ATC_040;
switch (size) {
@@ -862,31 +877,13 @@ int m68k_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
if (!(access_type & ACCESS_STORE)) {
env->mmu.ssw |= M68K_RW_040;
}
- env->mmu.ar = address;
- cs->exception_index = EXCP_ACCESS;
- return 1;
-}
-
-/* Notify CPU of a pending interrupt. Prioritization and vectoring should
- be handled by the interrupt controller. Real hardware only requests
- the vector when the interrupt is acknowledged by the CPU. For
- simplicitly we calculate it when the interrupt is signalled. */
-void m68k_set_irq_level(M68kCPU *cpu, int level, uint8_t vector)
-{
- CPUState *cs = CPU(cpu);
- CPUM68KState *env = &cpu->env;
+#endif
- env->pending_level = level;
- env->pending_vector = vector;
- if (level) {
- cpu_interrupt(cs, CPU_INTERRUPT_HARD);
- } else {
- cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
- }
+ cs->exception_index = EXCP_ACCESS;
+ env->mmu.ar = address;
+ cpu_loop_exit_restore(cs, retaddr);
}
-#endif
-
uint32_t HELPER(bitrev)(uint32_t x)
{
x = ((x >> 1) & 0x55555555u) | ((x << 1) & 0xaaaaaaaau);
diff --git a/target/m68k/op_helper.c b/target/m68k/op_helper.c
index 76f4399..d421614 100644
--- a/target/m68k/op_helper.c
+++ b/target/m68k/op_helper.c
@@ -36,21 +36,6 @@ static inline void do_interrupt_m68k_hardirq(CPUM68KState *env)
#else
-/* Try to fill the TLB and return an exception if error. If retaddr is
- NULL, it means that the function was called in C code (i.e. not
- from generated code or from helper.c) */
-void tlb_fill(CPUState *cs, target_ulong addr, int size,
- MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
-{
- int ret;
-
- ret = m68k_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
- if (unlikely(ret)) {
- /* now we have a real cpu fault */
- cpu_loop_exit_restore(cs, retaddr);
- }
-}
-
static void cf_rte(CPUM68KState *env)
{
uint32_t sp;
diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c
index 5596cd5..0ea5499 100644
--- a/target/microblaze/cpu.c
+++ b/target/microblaze/cpu.c
@@ -304,9 +304,8 @@ static void mb_cpu_class_init(ObjectClass *oc, void *data)
cc->set_pc = mb_cpu_set_pc;
cc->gdb_read_register = mb_cpu_gdb_read_register;
cc->gdb_write_register = mb_cpu_gdb_write_register;
-#ifdef CONFIG_USER_ONLY
- cc->handle_mmu_fault = mb_cpu_handle_mmu_fault;
-#else
+ cc->tlb_fill = mb_cpu_tlb_fill;
+#ifndef CONFIG_USER_ONLY
cc->do_transaction_failed = mb_cpu_transaction_failed;
cc->get_phys_page_debug = mb_cpu_get_phys_page_debug;
#endif
diff --git a/target/microblaze/cpu.h b/target/microblaze/cpu.h
index f20e796..7a9fb8f 100644
--- a/target/microblaze/cpu.h
+++ b/target/microblaze/cpu.h
@@ -374,8 +374,9 @@ static inline int cpu_mmu_index (CPUMBState *env, bool ifetch)
return MMU_KERNEL_IDX;
}
-int mb_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw,
- int mmu_idx);
+bool mb_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
#include "exec/cpu-all.h"
diff --git a/target/microblaze/helper.c b/target/microblaze/helper.c
index 9848e31..ab2ceeb 100644
--- a/target/microblaze/helper.c
+++ b/target/microblaze/helper.c
@@ -38,73 +38,74 @@ void mb_cpu_do_interrupt(CPUState *cs)
env->regs[14] = env->sregs[SR_PC];
}
-int mb_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
- int mmu_idx)
+bool mb_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
cs->exception_index = 0xaa;
- cpu_dump_state(cs, stderr, 0);
- return 1;
+ cpu_loop_exit_restore(cs, retaddr);
}
#else /* !CONFIG_USER_ONLY */
-int mb_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
- int mmu_idx)
+bool mb_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
CPUMBState *env = &cpu->env;
+ struct microblaze_mmu_lookup lu;
unsigned int hit;
- int r = 1;
int prot;
- /* Translate if the MMU is available and enabled. */
- if (mmu_idx != MMU_NOMMU_IDX) {
- uint32_t vaddr, paddr;
- struct microblaze_mmu_lookup lu;
-
- hit = mmu_translate(&env->mmu, &lu, address, rw, mmu_idx);
- if (hit) {
- vaddr = address & TARGET_PAGE_MASK;
- paddr = lu.paddr + vaddr - lu.vaddr;
-
- qemu_log_mask(CPU_LOG_MMU, "MMU map mmu=%d v=%x p=%x prot=%x\n",
- mmu_idx, vaddr, paddr, lu.prot);
- tlb_set_page(cs, vaddr, paddr, lu.prot, mmu_idx, TARGET_PAGE_SIZE);
- r = 0;
- } else {
- env->sregs[SR_EAR] = address;
- qemu_log_mask(CPU_LOG_MMU, "mmu=%d miss v=%" VADDR_PRIx "\n",
- mmu_idx, address);
-
- switch (lu.err) {
- case ERR_PROT:
- env->sregs[SR_ESR] = rw == 2 ? 17 : 16;
- env->sregs[SR_ESR] |= (rw == 1) << 10;
- break;
- case ERR_MISS:
- env->sregs[SR_ESR] = rw == 2 ? 19 : 18;
- env->sregs[SR_ESR] |= (rw == 1) << 10;
- break;
- default:
- abort();
- break;
- }
-
- if (cs->exception_index == EXCP_MMU) {
- cpu_abort(cs, "recursive faults\n");
- }
-
- /* TLB miss. */
- cs->exception_index = EXCP_MMU;
- }
- } else {
+ if (mmu_idx == MMU_NOMMU_IDX) {
/* MMU disabled or not available. */
address &= TARGET_PAGE_MASK;
prot = PAGE_BITS;
tlb_set_page(cs, address, address, prot, mmu_idx, TARGET_PAGE_SIZE);
- r = 0;
+ return true;
}
- return r;
+
+ hit = mmu_translate(&env->mmu, &lu, address, access_type, mmu_idx);
+ if (likely(hit)) {
+ uint32_t vaddr = address & TARGET_PAGE_MASK;
+ uint32_t paddr = lu.paddr + vaddr - lu.vaddr;
+
+ qemu_log_mask(CPU_LOG_MMU, "MMU map mmu=%d v=%x p=%x prot=%x\n",
+ mmu_idx, vaddr, paddr, lu.prot);
+ tlb_set_page(cs, vaddr, paddr, lu.prot, mmu_idx, TARGET_PAGE_SIZE);
+ return true;
+ }
+
+ /* TLB miss. */
+ if (probe) {
+ return false;
+ }
+
+ qemu_log_mask(CPU_LOG_MMU, "mmu=%d miss v=%" VADDR_PRIx "\n",
+ mmu_idx, address);
+
+ env->sregs[SR_EAR] = address;
+ switch (lu.err) {
+ case ERR_PROT:
+ env->sregs[SR_ESR] = access_type == MMU_INST_FETCH ? 17 : 16;
+ env->sregs[SR_ESR] |= (access_type == MMU_DATA_STORE) << 10;
+ break;
+ case ERR_MISS:
+ env->sregs[SR_ESR] = access_type == MMU_INST_FETCH ? 19 : 18;
+ env->sregs[SR_ESR] |= (access_type == MMU_DATA_STORE) << 10;
+ break;
+ default:
+ abort();
+ }
+
+ if (cs->exception_index == EXCP_MMU) {
+ cpu_abort(cs, "recursive faults\n");
+ }
+
+ /* TLB miss. */
+ cs->exception_index = EXCP_MMU;
+ cpu_loop_exit_restore(cs, retaddr);
}
void mb_cpu_do_interrupt(CPUState *cs)
diff --git a/target/microblaze/op_helper.c b/target/microblaze/op_helper.c
index e23dcfd..b5dbb90 100644
--- a/target/microblaze/op_helper.c
+++ b/target/microblaze/op_helper.c
@@ -28,25 +28,6 @@
#define D(x)
-#if !defined(CONFIG_USER_ONLY)
-
-/* Try to fill the TLB and return an exception if error. If retaddr is
- * NULL, it means that the function was called in C code (i.e. not
- * from generated code or from helper.c)
- */
-void tlb_fill(CPUState *cs, target_ulong addr, int size,
- MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
-{
- int ret;
-
- ret = mb_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
- if (unlikely(ret)) {
- /* now we have a real cpu fault */
- cpu_loop_exit_restore(cs, retaddr);
- }
-}
-#endif
-
void helper_put(uint32_t id, uint32_t ctrl, uint32_t data)
{
int test = ctrl & STREAM_TEST;
diff --git a/target/mips/cpu.c b/target/mips/cpu.c
index e217fb3..a330586 100644
--- a/target/mips/cpu.c
+++ b/target/mips/cpu.c
@@ -197,9 +197,7 @@ static void mips_cpu_class_init(ObjectClass *c, void *data)
cc->synchronize_from_tb = mips_cpu_synchronize_from_tb;
cc->gdb_read_register = mips_cpu_gdb_read_register;
cc->gdb_write_register = mips_cpu_gdb_write_register;
-#ifdef CONFIG_USER_ONLY
- cc->handle_mmu_fault = mips_cpu_handle_mmu_fault;
-#else
+#ifndef CONFIG_USER_ONLY
cc->do_unassigned_access = mips_cpu_unassigned_access;
cc->do_unaligned_access = mips_cpu_do_unaligned_access;
cc->get_phys_page_debug = mips_cpu_get_phys_page_debug;
@@ -208,6 +206,7 @@ static void mips_cpu_class_init(ObjectClass *c, void *data)
cc->disas_set_info = mips_cpu_disas_set_info;
#ifdef CONFIG_TCG
cc->tcg_initialize = mips_tcg_init;
+ cc->tlb_fill = mips_cpu_tlb_fill;
#endif
cc->gdb_num_core_regs = 73;
diff --git a/target/mips/helper.c b/target/mips/helper.c
index c44cdca..9799f2e 100644
--- a/target/mips/helper.c
+++ b/target/mips/helper.c
@@ -874,31 +874,25 @@ refill:
#endif
#endif
-int mips_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
- int mmu_idx)
+bool mips_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
MIPSCPU *cpu = MIPS_CPU(cs);
CPUMIPSState *env = &cpu->env;
#if !defined(CONFIG_USER_ONLY)
hwaddr physical;
int prot;
- int access_type;
+ int mips_access_type;
#endif
- int ret = 0;
-
-#if 0
- log_cpu_state(cs, 0);
-#endif
- qemu_log_mask(CPU_LOG_MMU,
- "%s pc " TARGET_FMT_lx " ad %" VADDR_PRIx " rw %d mmu_idx %d\n",
- __func__, env->active_tc.PC, address, rw, mmu_idx);
+ int ret = TLBRET_BADADDR;
/* data access */
#if !defined(CONFIG_USER_ONLY)
/* XXX: put correct access by using cpu_restore_state() correctly */
- access_type = ACCESS_INT;
- ret = get_physical_address(env, &physical, &prot,
- address, rw, access_type, mmu_idx);
+ mips_access_type = ACCESS_INT;
+ ret = get_physical_address(env, &physical, &prot, address,
+ access_type, mips_access_type, mmu_idx);
switch (ret) {
case TLBRET_MATCH:
qemu_log_mask(CPU_LOG_MMU,
@@ -915,44 +909,41 @@ int mips_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
tlb_set_page(cs, address & TARGET_PAGE_MASK,
physical & TARGET_PAGE_MASK, prot | PAGE_EXEC,
mmu_idx, TARGET_PAGE_SIZE);
- ret = 0;
- } else if (ret < 0)
-#endif
- {
-#if !defined(CONFIG_USER_ONLY)
+ return true;
+ }
#if !defined(TARGET_MIPS64)
- if ((ret == TLBRET_NOMATCH) && (env->tlb->nb_tlb > 1)) {
- /*
- * Memory reads during hardware page table walking are performed
- * as if they were kernel-mode load instructions.
- */
- int mode = (env->hflags & MIPS_HFLAG_KSU);
- bool ret_walker;
- env->hflags &= ~MIPS_HFLAG_KSU;
- ret_walker = page_table_walk_refill(env, address, rw, mmu_idx);
- env->hflags |= mode;
- if (ret_walker) {
- ret = get_physical_address(env, &physical, &prot,
- address, rw, access_type, mmu_idx);
- if (ret == TLBRET_MATCH) {
- tlb_set_page(cs, address & TARGET_PAGE_MASK,
- physical & TARGET_PAGE_MASK, prot | PAGE_EXEC,
- mmu_idx, TARGET_PAGE_SIZE);
- ret = 0;
- return ret;
- }
+ if ((ret == TLBRET_NOMATCH) && (env->tlb->nb_tlb > 1)) {
+ /*
+ * Memory reads during hardware page table walking are performed
+ * as if they were kernel-mode load instructions.
+ */
+ int mode = (env->hflags & MIPS_HFLAG_KSU);
+ bool ret_walker;
+ env->hflags &= ~MIPS_HFLAG_KSU;
+ ret_walker = page_table_walk_refill(env, address, access_type, mmu_idx);
+ env->hflags |= mode;
+ if (ret_walker) {
+ ret = get_physical_address(env, &physical, &prot, address,
+ access_type, mips_access_type, mmu_idx);
+ if (ret == TLBRET_MATCH) {
+ tlb_set_page(cs, address & TARGET_PAGE_MASK,
+ physical & TARGET_PAGE_MASK, prot | PAGE_EXEC,
+ mmu_idx, TARGET_PAGE_SIZE);
+ return true;
}
}
+ }
#endif
-#endif
- raise_mmu_exception(env, address, rw, ret);
- ret = 1;
+ if (probe) {
+ return false;
}
+#endif
- return ret;
+ raise_mmu_exception(env, address, access_type, ret);
+ do_raise_exception_err(env, cs->exception_index, env->error_code, retaddr);
}
-#if !defined(CONFIG_USER_ONLY)
+#ifndef CONFIG_USER_ONLY
hwaddr cpu_mips_translate_address(CPUMIPSState *env, target_ulong address, int rw)
{
hwaddr physical;
diff --git a/target/mips/internal.h b/target/mips/internal.h
index 286e388..b2b41a5 100644
--- a/target/mips/internal.h
+++ b/target/mips/internal.h
@@ -202,8 +202,9 @@ void cpu_mips_start_count(CPUMIPSState *env);
void cpu_mips_stop_count(CPUMIPSState *env);
/* helper.c */
-int mips_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw,
- int mmu_idx);
+bool mips_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
/* op_helper.c */
uint32_t float_class_s(uint32_t arg, float_status *fst);
diff --git a/target/mips/op_helper.c b/target/mips/op_helper.c
index 0f272a5..6d86912 100644
--- a/target/mips/op_helper.c
+++ b/target/mips/op_helper.c
@@ -2669,21 +2669,6 @@ void mips_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
do_raise_exception_err(env, excp, error_code, retaddr);
}
-void tlb_fill(CPUState *cs, target_ulong addr, int size,
- MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
-{
- int ret;
-
- ret = mips_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
- if (ret) {
- MIPSCPU *cpu = MIPS_CPU(cs);
- CPUMIPSState *env = &cpu->env;
-
- do_raise_exception_err(env, cs->exception_index,
- env->error_code, retaddr);
- }
-}
-
void mips_cpu_unassigned_access(CPUState *cs, hwaddr addr,
bool is_write, bool is_exec, int unused,
unsigned size)
diff --git a/target/moxie/cpu.c b/target/moxie/cpu.c
index 46434e6..02b2b47 100644
--- a/target/moxie/cpu.c
+++ b/target/moxie/cpu.c
@@ -112,9 +112,8 @@ static void moxie_cpu_class_init(ObjectClass *oc, void *data)
cc->do_interrupt = moxie_cpu_do_interrupt;
cc->dump_state = moxie_cpu_dump_state;
cc->set_pc = moxie_cpu_set_pc;
-#ifdef CONFIG_USER_ONLY
- cc->handle_mmu_fault = moxie_cpu_handle_mmu_fault;
-#else
+ cc->tlb_fill = moxie_cpu_tlb_fill;
+#ifndef CONFIG_USER_ONLY
cc->get_phys_page_debug = moxie_cpu_get_phys_page_debug;
cc->vmsd = &vmstate_moxie_cpu;
#endif
diff --git a/target/moxie/cpu.h b/target/moxie/cpu.h
index f3b6d83..a63a96b 100644
--- a/target/moxie/cpu.h
+++ b/target/moxie/cpu.h
@@ -139,7 +139,8 @@ static inline void cpu_get_tb_cpu_state(CPUMoxieState *env, target_ulong *pc,
*flags = 0;
}
-int moxie_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size,
- int rw, int mmu_idx);
+bool moxie_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
#endif /* MOXIE_CPU_H */
diff --git a/target/moxie/helper.c b/target/moxie/helper.c
index 287a452..f5c1d41 100644
--- a/target/moxie/helper.c
+++ b/target/moxie/helper.c
@@ -26,20 +26,6 @@
#include "qemu/host-utils.h"
#include "exec/helper-proto.h"
-/* Try to fill the TLB and return an exception if error. If retaddr is
- NULL, it means that the function was called in C code (i.e. not
- from generated code or from helper.c) */
-void tlb_fill(CPUState *cs, target_ulong addr, int size,
- MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
-{
- int ret;
-
- ret = moxie_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
- if (unlikely(ret)) {
- cpu_loop_exit_restore(cs, retaddr);
- }
-}
-
void helper_raise_exception(CPUMoxieState *env, int ex)
{
CPUState *cs = CPU(moxie_env_get_cpu(env));
@@ -85,53 +71,29 @@ void helper_debug(CPUMoxieState *env)
cpu_loop_exit(cs);
}
-#if defined(CONFIG_USER_ONLY)
-
-void moxie_cpu_do_interrupt(CPUState *cs)
-{
- CPUState *cs = CPU(moxie_env_get_cpu(env));
-
- cs->exception_index = -1;
-}
-
-int moxie_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
- int rw, int mmu_idx)
-{
- MoxieCPU *cpu = MOXIE_CPU(cs);
-
- cs->exception_index = 0xaa;
- cpu->env.debug1 = address;
- cpu_dump_state(cs, stderr, 0);
- return 1;
-}
-
-#else /* !CONFIG_USER_ONLY */
-
-int moxie_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
- int rw, int mmu_idx)
+bool moxie_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
MoxieCPU *cpu = MOXIE_CPU(cs);
CPUMoxieState *env = &cpu->env;
MoxieMMUResult res;
int prot, miss;
- target_ulong phy;
- int r = 1;
address &= TARGET_PAGE_MASK;
prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
- miss = moxie_mmu_translate(&res, env, address, rw, mmu_idx);
- if (miss) {
- /* handle the miss. */
- phy = 0;
- cs->exception_index = MOXIE_EX_MMU_MISS;
- } else {
- phy = res.phy;
- r = 0;
+ miss = moxie_mmu_translate(&res, env, address, access_type, mmu_idx);
+ if (likely(!miss)) {
+ tlb_set_page(cs, address, res.phy, prot, mmu_idx, TARGET_PAGE_SIZE);
+ return true;
+ }
+ if (probe) {
+ return false;
}
- tlb_set_page(cs, address, phy, prot, mmu_idx, TARGET_PAGE_SIZE);
- return r;
-}
+ cs->exception_index = MOXIE_EX_MMU_MISS;
+ cpu_loop_exit_restore(cs, retaddr);
+}
void moxie_cpu_do_interrupt(CPUState *cs)
{
@@ -156,4 +118,3 @@ hwaddr moxie_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
}
return phy;
}
-#endif
diff --git a/target/nios2/cpu.c b/target/nios2/cpu.c
index fbfaa2c..186af49 100644
--- a/target/nios2/cpu.c
+++ b/target/nios2/cpu.c
@@ -200,9 +200,8 @@ static void nios2_cpu_class_init(ObjectClass *oc, void *data)
cc->dump_state = nios2_cpu_dump_state;
cc->set_pc = nios2_cpu_set_pc;
cc->disas_set_info = nios2_cpu_disas_set_info;
-#ifdef CONFIG_USER_ONLY
- cc->handle_mmu_fault = nios2_cpu_handle_mmu_fault;
-#else
+ cc->tlb_fill = nios2_cpu_tlb_fill;
+#ifndef CONFIG_USER_ONLY
cc->do_unaligned_access = nios2_cpu_do_unaligned_access;
cc->get_phys_page_debug = nios2_cpu_get_phys_page_debug;
#endif
diff --git a/target/nios2/cpu.h b/target/nios2/cpu.h
index adc9072..35d3886 100644
--- a/target/nios2/cpu.h
+++ b/target/nios2/cpu.h
@@ -253,8 +253,9 @@ static inline int cpu_mmu_index(CPUNios2State *env, bool ifetch)
MMU_SUPERVISOR_IDX;
}
-int nios2_cpu_handle_mmu_fault(CPUState *env, vaddr address, int size,
- int rw, int mmu_idx);
+bool nios2_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
static inline int cpu_interrupts_enabled(CPUNios2State *env)
{
diff --git a/target/nios2/helper.c b/target/nios2/helper.c
index e01fc1f..ffb83fc 100644
--- a/target/nios2/helper.c
+++ b/target/nios2/helper.c
@@ -38,15 +38,12 @@ void nios2_cpu_do_interrupt(CPUState *cs)
env->regs[R_EA] = env->regs[R_PC] + 4;
}
-int nios2_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
- int rw, int mmu_idx)
+bool nios2_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
cs->exception_index = 0xaa;
- /* Page 0x1000 is kuser helper */
- if (address < 0x1000 || address >= 0x2000) {
- cpu_dump_state(cs, stderr, 0);
- }
- return 1;
+ cpu_loop_exit_restore(cs, retaddr);
}
#else /* !CONFIG_USER_ONLY */
@@ -203,89 +200,6 @@ void nios2_cpu_do_interrupt(CPUState *cs)
}
}
-static int cpu_nios2_handle_virtual_page(
- CPUState *cs, target_ulong address, int rw, int mmu_idx)
-{
- Nios2CPU *cpu = NIOS2_CPU(cs);
- CPUNios2State *env = &cpu->env;
- target_ulong vaddr, paddr;
- Nios2MMULookup lu;
- unsigned int hit;
- hit = mmu_translate(env, &lu, address, rw, mmu_idx);
- if (hit) {
- vaddr = address & TARGET_PAGE_MASK;
- paddr = lu.paddr + vaddr - lu.vaddr;
-
- if (((rw == 0) && (lu.prot & PAGE_READ)) ||
- ((rw == 1) && (lu.prot & PAGE_WRITE)) ||
- ((rw == 2) && (lu.prot & PAGE_EXEC))) {
-
- tlb_set_page(cs, vaddr, paddr, lu.prot,
- mmu_idx, TARGET_PAGE_SIZE);
- return 0;
- } else {
- /* Permission violation */
- cs->exception_index = (rw == 0) ? EXCP_TLBR :
- ((rw == 1) ? EXCP_TLBW :
- EXCP_TLBX);
- }
- } else {
- cs->exception_index = EXCP_TLBD;
- }
-
- if (rw == 2) {
- env->regs[CR_TLBMISC] &= ~CR_TLBMISC_D;
- } else {
- env->regs[CR_TLBMISC] |= CR_TLBMISC_D;
- }
- env->regs[CR_PTEADDR] &= CR_PTEADDR_PTBASE_MASK;
- env->regs[CR_PTEADDR] |= (address >> 10) & CR_PTEADDR_VPN_MASK;
- env->mmu.pteaddr_wr = env->regs[CR_PTEADDR];
- env->regs[CR_BADADDR] = address;
- return 1;
-}
-
-int nios2_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
- int rw, int mmu_idx)
-{
- Nios2CPU *cpu = NIOS2_CPU(cs);
- CPUNios2State *env = &cpu->env;
-
- if (cpu->mmu_present) {
- if (MMU_SUPERVISOR_IDX == mmu_idx) {
- if (address >= 0xC0000000) {
- /* Kernel physical page - TLB bypassed */
- address &= TARGET_PAGE_MASK;
- tlb_set_page(cs, address, address, PAGE_BITS,
- mmu_idx, TARGET_PAGE_SIZE);
- } else if (address >= 0x80000000) {
- /* Kernel virtual page */
- return cpu_nios2_handle_virtual_page(cs, address, rw, mmu_idx);
- } else {
- /* User virtual page */
- return cpu_nios2_handle_virtual_page(cs, address, rw, mmu_idx);
- }
- } else {
- if (address >= 0x80000000) {
- /* Illegal access from user mode */
- cs->exception_index = EXCP_SUPERA;
- env->regs[CR_BADADDR] = address;
- return 1;
- } else {
- /* User virtual page */
- return cpu_nios2_handle_virtual_page(cs, address, rw, mmu_idx);
- }
- }
- } else {
- /* No MMU */
- address &= TARGET_PAGE_MASK;
- tlb_set_page(cs, address, address, PAGE_BITS,
- mmu_idx, TARGET_PAGE_SIZE);
- }
-
- return 0;
-}
-
hwaddr nios2_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
{
Nios2CPU *cpu = NIOS2_CPU(cs);
@@ -321,4 +235,80 @@ void nios2_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
env->regs[CR_EXCEPTION] = EXCP_UNALIGN << 2;
helper_raise_exception(env, EXCP_UNALIGN);
}
+
+bool nios2_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
+{
+ Nios2CPU *cpu = NIOS2_CPU(cs);
+ CPUNios2State *env = &cpu->env;
+ unsigned int excp = EXCP_TLBD;
+ target_ulong vaddr, paddr;
+ Nios2MMULookup lu;
+ unsigned int hit;
+
+ if (!cpu->mmu_present) {
+ /* No MMU */
+ address &= TARGET_PAGE_MASK;
+ tlb_set_page(cs, address, address, PAGE_BITS,
+ mmu_idx, TARGET_PAGE_SIZE);
+ return true;
+ }
+
+ if (MMU_SUPERVISOR_IDX == mmu_idx) {
+ if (address >= 0xC0000000) {
+ /* Kernel physical page - TLB bypassed */
+ address &= TARGET_PAGE_MASK;
+ tlb_set_page(cs, address, address, PAGE_BITS,
+ mmu_idx, TARGET_PAGE_SIZE);
+ return true;
+ }
+ } else {
+ if (address >= 0x80000000) {
+ /* Illegal access from user mode */
+ if (probe) {
+ return false;
+ }
+ cs->exception_index = EXCP_SUPERA;
+ env->regs[CR_BADADDR] = address;
+ cpu_loop_exit_restore(cs, retaddr);
+ }
+ }
+
+ /* Virtual page. */
+ hit = mmu_translate(env, &lu, address, access_type, mmu_idx);
+ if (hit) {
+ vaddr = address & TARGET_PAGE_MASK;
+ paddr = lu.paddr + vaddr - lu.vaddr;
+
+ if (((access_type == MMU_DATA_LOAD) && (lu.prot & PAGE_READ)) ||
+ ((access_type == MMU_DATA_STORE) && (lu.prot & PAGE_WRITE)) ||
+ ((access_type == MMU_INST_FETCH) && (lu.prot & PAGE_EXEC))) {
+ tlb_set_page(cs, vaddr, paddr, lu.prot,
+ mmu_idx, TARGET_PAGE_SIZE);
+ return true;
+ }
+
+ /* Permission violation */
+ excp = (access_type == MMU_DATA_LOAD ? EXCP_TLBR :
+ access_type == MMU_DATA_STORE ? EXCP_TLBW : EXCP_TLBX);
+ }
+
+ if (probe) {
+ return false;
+ }
+
+ if (access_type == MMU_INST_FETCH) {
+ env->regs[CR_TLBMISC] &= ~CR_TLBMISC_D;
+ } else {
+ env->regs[CR_TLBMISC] |= CR_TLBMISC_D;
+ }
+ env->regs[CR_PTEADDR] &= CR_PTEADDR_PTBASE_MASK;
+ env->regs[CR_PTEADDR] |= (address >> 10) & CR_PTEADDR_VPN_MASK;
+ env->mmu.pteaddr_wr = env->regs[CR_PTEADDR];
+
+ cs->exception_index = excp;
+ env->regs[CR_BADADDR] = address;
+ cpu_loop_exit_restore(cs, retaddr);
+}
#endif /* !CONFIG_USER_ONLY */
diff --git a/target/nios2/mmu.c b/target/nios2/mmu.c
index 5acf442..47fa474 100644
--- a/target/nios2/mmu.c
+++ b/target/nios2/mmu.c
@@ -36,18 +36,6 @@
#define MMU_LOG(x)
#endif
-void tlb_fill(CPUState *cs, target_ulong addr, int size,
- MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
-{
- int ret;
-
- ret = nios2_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
- if (unlikely(ret)) {
- /* now we have a real cpu fault */
- cpu_loop_exit_restore(cs, retaddr);
- }
-}
-
void mmu_read_debug(CPUNios2State *env, uint32_t rn)
{
switch (rn) {
diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c
index d125236..3816bae 100644
--- a/target/openrisc/cpu.c
+++ b/target/openrisc/cpu.c
@@ -149,9 +149,8 @@ static void openrisc_cpu_class_init(ObjectClass *oc, void *data)
cc->set_pc = openrisc_cpu_set_pc;
cc->gdb_read_register = openrisc_cpu_gdb_read_register;
cc->gdb_write_register = openrisc_cpu_gdb_write_register;
-#ifdef CONFIG_USER_ONLY
- cc->handle_mmu_fault = openrisc_cpu_handle_mmu_fault;
-#else
+ cc->tlb_fill = openrisc_cpu_tlb_fill;
+#ifndef CONFIG_USER_ONLY
cc->get_phys_page_debug = openrisc_cpu_get_phys_page_debug;
dc->vmsd = &vmstate_openrisc_cpu;
#endif
diff --git a/target/openrisc/cpu.h b/target/openrisc/cpu.h
index 88a8c70..9473d94 100644
--- a/target/openrisc/cpu.h
+++ b/target/openrisc/cpu.h
@@ -344,8 +344,9 @@ hwaddr openrisc_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
int openrisc_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
int openrisc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
void openrisc_translate_init(void);
-int openrisc_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size,
- int rw, int mmu_idx);
+bool openrisc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
int cpu_openrisc_signal_handler(int host_signum, void *pinfo, void *puc);
int print_insn_or1k(bfd_vma addr, disassemble_info *info);
diff --git a/target/openrisc/mmu.c b/target/openrisc/mmu.c
index 5dec68d..a73b12a 100644
--- a/target/openrisc/mmu.c
+++ b/target/openrisc/mmu.c
@@ -107,16 +107,42 @@ static void raise_mmu_exception(OpenRISCCPU *cpu, target_ulong address,
cpu->env.lock_addr = -1;
}
-int openrisc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
- int rw, int mmu_idx)
+bool openrisc_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
-#ifdef CONFIG_USER_ONLY
OpenRISCCPU *cpu = OPENRISC_CPU(cs);
- raise_mmu_exception(cpu, address, EXCP_DPF);
- return 1;
-#else
- g_assert_not_reached();
+ int excp = EXCP_DPF;
+
+#ifndef CONFIG_USER_ONLY
+ int prot;
+ hwaddr phys_addr;
+
+ if (mmu_idx == MMU_NOMMU_IDX) {
+ /* The mmu is disabled; lookups never fail. */
+ get_phys_nommu(&phys_addr, &prot, addr);
+ excp = 0;
+ } else {
+ bool super = mmu_idx == MMU_SUPERVISOR_IDX;
+ int need = (access_type == MMU_INST_FETCH ? PAGE_EXEC
+ : access_type == MMU_DATA_STORE ? PAGE_WRITE
+ : PAGE_READ);
+ excp = get_phys_mmu(cpu, &phys_addr, &prot, addr, need, super);
+ }
+
+ if (likely(excp == 0)) {
+ tlb_set_page(cs, addr & TARGET_PAGE_MASK,
+ phys_addr & TARGET_PAGE_MASK, prot,
+ mmu_idx, TARGET_PAGE_SIZE);
+ return true;
+ }
+ if (probe) {
+ return false;
+ }
#endif
+
+ raise_mmu_exception(cpu, addr, excp);
+ cpu_loop_exit_restore(cs, retaddr);
}
#ifndef CONFIG_USER_ONLY
@@ -152,33 +178,4 @@ hwaddr openrisc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
return phys_addr;
}
}
-
-void tlb_fill(CPUState *cs, target_ulong addr, int size,
- MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
-{
- OpenRISCCPU *cpu = OPENRISC_CPU(cs);
- int prot, excp;
- hwaddr phys_addr;
-
- if (mmu_idx == MMU_NOMMU_IDX) {
- /* The mmu is disabled; lookups never fail. */
- get_phys_nommu(&phys_addr, &prot, addr);
- excp = 0;
- } else {
- bool super = mmu_idx == MMU_SUPERVISOR_IDX;
- int need = (access_type == MMU_INST_FETCH ? PAGE_EXEC
- : access_type == MMU_DATA_STORE ? PAGE_WRITE
- : PAGE_READ);
- excp = get_phys_mmu(cpu, &phys_addr, &prot, addr, need, super);
- }
-
- if (unlikely(excp)) {
- raise_mmu_exception(cpu, addr, excp);
- cpu_loop_exit_restore(cs, retaddr);
- }
-
- tlb_set_page(cs, addr & TARGET_PAGE_MASK,
- phys_addr & TARGET_PAGE_MASK, prot,
- mmu_idx, TARGET_PAGE_SIZE);
-}
#endif
diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
index 5e7cf54..d7f23ad 100644
--- a/target/ppc/cpu.h
+++ b/target/ppc/cpu.h
@@ -1311,10 +1311,9 @@ void ppc_translate_init(void);
* is returned if the signal was handled by the virtual CPU.
*/
int cpu_ppc_signal_handler(int host_signum, void *pinfo, void *puc);
-#if defined(CONFIG_USER_ONLY)
-int ppc_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw,
- int mmu_idx);
-#endif
+bool ppc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
#if !defined(CONFIG_USER_ONLY)
void ppc_store_sdr1(CPUPPCState *env, target_ulong value);
diff --git a/target/ppc/mmu_helper.c b/target/ppc/mmu_helper.c
index 1dbc9ac..e605efa 100644
--- a/target/ppc/mmu_helper.c
+++ b/target/ppc/mmu_helper.c
@@ -3057,15 +3057,9 @@ void helper_check_tlb_flush_global(CPUPPCState *env)
/*****************************************************************************/
-/*
- * try to fill the TLB and return an exception if error. If retaddr is
- * NULL, it means that the function was called in C code (i.e. not
- * from generated code or from helper.c)
- *
- * XXX: fix it to restore all registers
- */
-void tlb_fill(CPUState *cs, target_ulong addr, int size,
- MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
+bool ppc_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
PowerPCCPU *cpu = POWERPC_CPU(cs);
PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs);
@@ -3078,7 +3072,11 @@ void tlb_fill(CPUState *cs, target_ulong addr, int size,
ret = cpu_ppc_handle_mmu_fault(env, addr, access_type, mmu_idx);
}
if (unlikely(ret != 0)) {
+ if (probe) {
+ return false;
+ }
raise_exception_err_ra(env, cs->exception_index, env->error_code,
retaddr);
}
+ return true;
}
diff --git a/target/ppc/translate_init.inc.c b/target/ppc/translate_init.inc.c
index 0394a9d..ad5e14b 100644
--- a/target/ppc/translate_init.inc.c
+++ b/target/ppc/translate_init.inc.c
@@ -10592,9 +10592,7 @@ static void ppc_cpu_class_init(ObjectClass *oc, void *data)
cc->gdb_read_register = ppc_cpu_gdb_read_register;
cc->gdb_write_register = ppc_cpu_gdb_write_register;
cc->do_unaligned_access = ppc_cpu_do_unaligned_access;
-#ifdef CONFIG_USER_ONLY
- cc->handle_mmu_fault = ppc_cpu_handle_mmu_fault;
-#else
+#ifndef CONFIG_USER_ONLY
cc->get_phys_page_debug = ppc_cpu_get_phys_page_debug;
cc->vmsd = &vmstate_ppc_cpu;
#endif
@@ -10624,6 +10622,7 @@ static void ppc_cpu_class_init(ObjectClass *oc, void *data)
#endif
#ifdef CONFIG_TCG
cc->tcg_initialize = ppc_translate_init;
+ cc->tlb_fill = ppc_cpu_tlb_fill;
#endif
cc->disas_set_info = ppc_disas_set_info;
diff --git a/target/ppc/user_only_helper.c b/target/ppc/user_only_helper.c
index 2f1477f..683c033 100644
--- a/target/ppc/user_only_helper.c
+++ b/target/ppc/user_only_helper.c
@@ -20,21 +20,24 @@
#include "qemu/osdep.h"
#include "cpu.h"
+#include "exec/exec-all.h"
-int ppc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
- int mmu_idx)
+
+bool ppc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
PowerPCCPU *cpu = POWERPC_CPU(cs);
CPUPPCState *env = &cpu->env;
int exception, error_code;
- if (rw == 2) {
+ if (access_type == MMU_INST_FETCH) {
exception = POWERPC_EXCP_ISI;
error_code = 0x40000000;
} else {
exception = POWERPC_EXCP_DSI;
error_code = 0x40000000;
- if (rw) {
+ if (access_type == MMU_DATA_STORE) {
error_code |= 0x02000000;
}
env->spr[SPR_DAR] = address;
@@ -42,6 +45,5 @@ int ppc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
}
cs->exception_index = exception;
env->error_code = error_code;
-
- return 1;
+ cpu_loop_exit_restore(cs, retaddr);
}
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
index 1bcf4ea..b767570 100644
--- a/target/riscv/cpu.c
+++ b/target/riscv/cpu.c
@@ -355,14 +355,13 @@ static void riscv_cpu_class_init(ObjectClass *c, void *data)
#endif
cc->gdb_stop_before_watchpoint = true;
cc->disas_set_info = riscv_cpu_disas_set_info;
-#ifdef CONFIG_USER_ONLY
- cc->handle_mmu_fault = riscv_cpu_handle_mmu_fault;
-#else
+#ifndef CONFIG_USER_ONLY
cc->do_unaligned_access = riscv_cpu_do_unaligned_access;
cc->get_phys_page_debug = riscv_cpu_get_phys_page_debug;
#endif
#ifdef CONFIG_TCG
cc->tcg_initialize = riscv_translate_init;
+ cc->tlb_fill = riscv_cpu_tlb_fill;
#endif
/* For now, mark unmigratable: */
cc->vmsd = &vmstate_riscv_cpu;
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
index 7d9f489..c17184f 100644
--- a/target/riscv/cpu.h
+++ b/target/riscv/cpu.h
@@ -261,8 +261,9 @@ hwaddr riscv_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
MMUAccessType access_type, int mmu_idx,
uintptr_t retaddr);
-int riscv_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size,
- int rw, int mmu_idx);
+bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
char *riscv_isa_string(RISCVCPU *cpu);
void riscv_cpu_list(void);
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
index b17f169..41d6db4 100644
--- a/target/riscv/cpu_helper.c
+++ b/target/riscv/cpu_helper.c
@@ -378,54 +378,44 @@ void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
env->badaddr = addr;
riscv_raise_exception(env, cs->exception_index, retaddr);
}
-
-/* called by qemu's softmmu to fill the qemu tlb */
-void tlb_fill(CPUState *cs, target_ulong addr, int size,
- MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
-{
- int ret;
- ret = riscv_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
- if (ret == TRANSLATE_FAIL) {
- RISCVCPU *cpu = RISCV_CPU(cs);
- CPURISCVState *env = &cpu->env;
- riscv_raise_exception(env, cs->exception_index, retaddr);
- }
-}
-
#endif
-int riscv_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
- int rw, int mmu_idx)
+bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
+#ifndef CONFIG_USER_ONLY
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
-#if !defined(CONFIG_USER_ONLY)
hwaddr pa = 0;
int prot;
-#endif
int ret = TRANSLATE_FAIL;
- qemu_log_mask(CPU_LOG_MMU,
- "%s pc " TARGET_FMT_lx " ad %" VADDR_PRIx " rw %d mmu_idx \
- %d\n", __func__, env->pc, address, rw, mmu_idx);
+ qemu_log_mask(CPU_LOG_MMU, "%s ad %" VADDR_PRIx " rw %d mmu_idx %d\n",
+ __func__, address, access_type, mmu_idx);
+
+ ret = get_physical_address(env, &pa, &prot, address, access_type, mmu_idx);
-#if !defined(CONFIG_USER_ONLY)
- ret = get_physical_address(env, &pa, &prot, address, rw, mmu_idx);
qemu_log_mask(CPU_LOG_MMU,
- "%s address=%" VADDR_PRIx " ret %d physical " TARGET_FMT_plx
- " prot %d\n", __func__, address, ret, pa, prot);
+ "%s address=%" VADDR_PRIx " ret %d physical " TARGET_FMT_plx
+ " prot %d\n", __func__, address, ret, pa, prot);
+
if (riscv_feature(env, RISCV_FEATURE_PMP) &&
- !pmp_hart_has_privs(env, pa, TARGET_PAGE_SIZE, 1 << rw)) {
+ !pmp_hart_has_privs(env, pa, TARGET_PAGE_SIZE, 1 << access_type)) {
ret = TRANSLATE_FAIL;
}
if (ret == TRANSLATE_SUCCESS) {
tlb_set_page(cs, address & TARGET_PAGE_MASK, pa & TARGET_PAGE_MASK,
prot, mmu_idx, TARGET_PAGE_SIZE);
- } else if (ret == TRANSLATE_FAIL) {
- raise_mmu_exception(env, address, rw);
+ return true;
+ } else if (probe) {
+ return false;
+ } else {
+ raise_mmu_exception(env, address, access_type);
+ riscv_raise_exception(env, cs->exception_index, retaddr);
}
#else
- switch (rw) {
+ switch (access_type) {
case MMU_INST_FETCH:
cs->exception_index = RISCV_EXCP_INST_PAGE_FAULT;
break;
@@ -436,8 +426,8 @@ int riscv_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
cs->exception_index = RISCV_EXCP_STORE_PAGE_FAULT;
break;
}
+ cpu_loop_exit_restore(cs, retaddr);
#endif
- return ret;
}
/*
diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c
index b58ef0a..b1df63d 100644
--- a/target/s390x/cpu.c
+++ b/target/s390x/cpu.c
@@ -478,9 +478,7 @@ static void s390_cpu_class_init(ObjectClass *oc, void *data)
cc->set_pc = s390_cpu_set_pc;
cc->gdb_read_register = s390_cpu_gdb_read_register;
cc->gdb_write_register = s390_cpu_gdb_write_register;
-#ifdef CONFIG_USER_ONLY
- cc->handle_mmu_fault = s390_cpu_handle_mmu_fault;
-#else
+#ifndef CONFIG_USER_ONLY
cc->get_phys_page_debug = s390_cpu_get_phys_page_debug;
cc->vmsd = &vmstate_s390_cpu;
cc->write_elf64_note = s390_cpu_write_elf64_note;
@@ -493,6 +491,7 @@ static void s390_cpu_class_init(ObjectClass *oc, void *data)
cc->disas_set_info = s390_cpu_disas_set_info;
#ifdef CONFIG_TCG
cc->tcg_initialize = s390x_translate_init;
+ cc->tlb_fill = s390_cpu_tlb_fill;
#endif
cc->gdb_num_core_regs = S390_NUM_CORE_REGS;
diff --git a/target/s390x/excp_helper.c b/target/s390x/excp_helper.c
index f84bfb1..3a467b7 100644
--- a/target/s390x/excp_helper.c
+++ b/target/s390x/excp_helper.c
@@ -74,8 +74,9 @@ void s390_cpu_do_interrupt(CPUState *cs)
cs->exception_index = -1;
}
-int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
- int rw, int mmu_idx)
+bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
S390CPU *cpu = S390_CPU(cs);
@@ -83,7 +84,7 @@ int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
/* On real machines this value is dropped into LowMem. Since this
is userland, simply put this someplace that cpu_loop can find it. */
cpu->env.__excp_addr = address;
- return 1;
+ cpu_loop_exit_restore(cs, retaddr);
}
#else /* !CONFIG_USER_ONLY */
@@ -102,19 +103,20 @@ static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx)
}
}
-int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr, int size,
- int rw, int mmu_idx)
+bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
S390CPU *cpu = S390_CPU(cs);
CPUS390XState *env = &cpu->env;
target_ulong vaddr, raddr;
uint64_t asc;
- int prot;
+ int prot, fail;
qemu_log_mask(CPU_LOG_MMU, "%s: addr 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
- __func__, orig_vaddr, rw, mmu_idx);
+ __func__, address, access_type, mmu_idx);
- vaddr = orig_vaddr;
+ vaddr = address;
if (mmu_idx < MMU_REAL_IDX) {
asc = cpu_mmu_idx_to_asc(mmu_idx);
@@ -122,39 +124,58 @@ int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr, int size,
if (!(env->psw.mask & PSW_MASK_64)) {
vaddr &= 0x7fffffff;
}
- if (mmu_translate(env, vaddr, rw, asc, &raddr, &prot, true)) {
- return 1;
- }
+ fail = mmu_translate(env, vaddr, access_type, asc, &raddr, &prot, true);
} else if (mmu_idx == MMU_REAL_IDX) {
/* 31-Bit mode */
if (!(env->psw.mask & PSW_MASK_64)) {
vaddr &= 0x7fffffff;
}
- if (mmu_translate_real(env, vaddr, rw, &raddr, &prot)) {
- return 1;
- }
+ fail = mmu_translate_real(env, vaddr, access_type, &raddr, &prot);
} else {
- abort();
+ g_assert_not_reached();
}
/* check out of RAM access */
- if (!address_space_access_valid(&address_space_memory, raddr,
- TARGET_PAGE_SIZE, rw,
+ if (!fail &&
+ !address_space_access_valid(&address_space_memory, raddr,
+ TARGET_PAGE_SIZE, access_type,
MEMTXATTRS_UNSPECIFIED)) {
qemu_log_mask(CPU_LOG_MMU,
"%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n",
__func__, (uint64_t)raddr, (uint64_t)ram_size);
trigger_pgm_exception(env, PGM_ADDRESSING, ILEN_AUTO);
- return 1;
+ fail = 1;
}
- qemu_log_mask(CPU_LOG_MMU, "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
- __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
+ if (!fail) {
+ qemu_log_mask(CPU_LOG_MMU,
+ "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
+ __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
+ tlb_set_page(cs, address & TARGET_PAGE_MASK, raddr, prot,
+ mmu_idx, TARGET_PAGE_SIZE);
+ return true;
+ }
+ if (probe) {
+ return false;
+ }
- tlb_set_page(cs, orig_vaddr & TARGET_PAGE_MASK, raddr, prot,
- mmu_idx, TARGET_PAGE_SIZE);
+ cpu_restore_state(cs, retaddr, true);
+
+ /*
+ * The ILC value for code accesses is undefined. The important
+ * thing here is to *not* leave env->int_pgm_ilen set to ILEN_AUTO,
+ * which would cause do_program_interrupt to attempt to read from
+ * env->psw.addr again. C.f. the condition in trigger_page_fault,
+ * but is not universally applied.
+ *
+ * ??? If we remove ILEN_AUTO, by moving the computation of ILEN
+ * into cpu_restore_state, then we may remove this entirely.
+ */
+ if (access_type == MMU_INST_FETCH) {
+ env->int_pgm_ilen = 2;
+ }
- return 0;
+ cpu_loop_exit(cs);
}
static void do_program_interrupt(CPUS390XState *env)
diff --git a/target/s390x/internal.h b/target/s390x/internal.h
index 26575f2..56534b3 100644
--- a/target/s390x/internal.h
+++ b/target/s390x/internal.h
@@ -263,8 +263,9 @@ ObjectClass *s390_cpu_class_by_name(const char *name);
void s390x_cpu_debug_excp_handler(CPUState *cs);
void s390_cpu_do_interrupt(CPUState *cpu);
bool s390_cpu_exec_interrupt(CPUState *cpu, int int_req);
-int s390_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw,
- int mmu_idx);
+bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr);
diff --git a/target/s390x/mem_helper.c b/target/s390x/mem_helper.c
index 3f76a8a..ffd5f02 100644
--- a/target/s390x/mem_helper.c
+++ b/target/s390x/mem_helper.c
@@ -33,22 +33,6 @@
/*****************************************************************************/
/* Softmmu support */
-#if !defined(CONFIG_USER_ONLY)
-
-/* try to fill the TLB and return an exception if error. If retaddr is
- NULL, it means that the function was called in C code (i.e. not
- from generated code or from helper.c) */
-/* XXX: fix it to restore all registers */
-void tlb_fill(CPUState *cs, target_ulong addr, int size,
- MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
-{
- int ret = s390_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
- if (unlikely(ret != 0)) {
- cpu_loop_exit_restore(cs, retaddr);
- }
-}
-
-#endif
/* #define DEBUG_HELPER */
#ifdef DEBUG_HELPER
diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c
index da27990..c4736a0 100644
--- a/target/sh4/cpu.c
+++ b/target/sh4/cpu.c
@@ -229,9 +229,8 @@ static void superh_cpu_class_init(ObjectClass *oc, void *data)
cc->synchronize_from_tb = superh_cpu_synchronize_from_tb;
cc->gdb_read_register = superh_cpu_gdb_read_register;
cc->gdb_write_register = superh_cpu_gdb_write_register;
-#ifdef CONFIG_USER_ONLY
- cc->handle_mmu_fault = superh_cpu_handle_mmu_fault;
-#else
+ cc->tlb_fill = superh_cpu_tlb_fill;
+#ifndef CONFIG_USER_ONLY
cc->do_unaligned_access = superh_cpu_do_unaligned_access;
cc->get_phys_page_debug = superh_cpu_get_phys_page_debug;
#endif
diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h
index 1be36fe..547194a 100644
--- a/target/sh4/cpu.h
+++ b/target/sh4/cpu.h
@@ -243,8 +243,9 @@ void superh_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
void sh4_translate_init(void);
int cpu_sh4_signal_handler(int host_signum, void *pinfo,
void *puc);
-int superh_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw,
- int mmu_idx);
+bool superh_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
void sh4_cpu_list(void);
#if !defined(CONFIG_USER_ONLY)
diff --git a/target/sh4/helper.c b/target/sh4/helper.c
index fa51269..fda195e 100644
--- a/target/sh4/helper.c
+++ b/target/sh4/helper.c
@@ -27,43 +27,6 @@
#include "hw/sh4/sh_intc.h"
#endif
-#if defined(CONFIG_USER_ONLY)
-
-void superh_cpu_do_interrupt(CPUState *cs)
-{
- cs->exception_index = -1;
-}
-
-int superh_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
- int mmu_idx)
-{
- SuperHCPU *cpu = SUPERH_CPU(cs);
- CPUSH4State *env = &cpu->env;
-
- env->tea = address;
- cs->exception_index = -1;
- switch (rw) {
- case 0:
- cs->exception_index = 0x0a0;
- break;
- case 1:
- cs->exception_index = 0x0c0;
- break;
- case 2:
- cs->exception_index = 0x0a0;
- break;
- }
- return 1;
-}
-
-int cpu_sh4_is_cached(CPUSH4State * env, target_ulong addr)
-{
- /* For user mode, only U0 area is cacheable. */
- return !(addr & 0x80000000);
-}
-
-#else /* !CONFIG_USER_ONLY */
-
#define MMU_OK 0
#define MMU_ITLB_MISS (-1)
#define MMU_ITLB_MULTIPLE (-2)
@@ -79,6 +42,21 @@ int cpu_sh4_is_cached(CPUSH4State * env, target_ulong addr)
#define MMU_DADDR_ERROR_READ (-12)
#define MMU_DADDR_ERROR_WRITE (-13)
+#if defined(CONFIG_USER_ONLY)
+
+void superh_cpu_do_interrupt(CPUState *cs)
+{
+ cs->exception_index = -1;
+}
+
+int cpu_sh4_is_cached(CPUSH4State *env, target_ulong addr)
+{
+ /* For user mode, only U0 area is cacheable. */
+ return !(addr & 0x80000000);
+}
+
+#else /* !CONFIG_USER_ONLY */
+
void superh_cpu_do_interrupt(CPUState *cs)
{
SuperHCPU *cpu = SUPERH_CPU(cs);
@@ -458,69 +436,6 @@ static int get_physical_address(CPUSH4State * env, target_ulong * physical,
return get_mmu_address(env, physical, prot, address, rw, access_type);
}
-int superh_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
- int mmu_idx)
-{
- SuperHCPU *cpu = SUPERH_CPU(cs);
- CPUSH4State *env = &cpu->env;
- target_ulong physical;
- int prot, ret, access_type;
-
- access_type = ACCESS_INT;
- ret =
- get_physical_address(env, &physical, &prot, address, rw,
- access_type);
-
- if (ret != MMU_OK) {
- env->tea = address;
- if (ret != MMU_DTLB_MULTIPLE && ret != MMU_ITLB_MULTIPLE) {
- env->pteh = (env->pteh & PTEH_ASID_MASK) |
- (address & PTEH_VPN_MASK);
- }
- switch (ret) {
- case MMU_ITLB_MISS:
- case MMU_DTLB_MISS_READ:
- cs->exception_index = 0x040;
- break;
- case MMU_DTLB_MULTIPLE:
- case MMU_ITLB_MULTIPLE:
- cs->exception_index = 0x140;
- break;
- case MMU_ITLB_VIOLATION:
- cs->exception_index = 0x0a0;
- break;
- case MMU_DTLB_MISS_WRITE:
- cs->exception_index = 0x060;
- break;
- case MMU_DTLB_INITIAL_WRITE:
- cs->exception_index = 0x080;
- break;
- case MMU_DTLB_VIOLATION_READ:
- cs->exception_index = 0x0a0;
- break;
- case MMU_DTLB_VIOLATION_WRITE:
- cs->exception_index = 0x0c0;
- break;
- case MMU_IADDR_ERROR:
- case MMU_DADDR_ERROR_READ:
- cs->exception_index = 0x0e0;
- break;
- case MMU_DADDR_ERROR_WRITE:
- cs->exception_index = 0x100;
- break;
- default:
- cpu_abort(cs, "Unhandled MMU fault");
- }
- return 1;
- }
-
- address &= TARGET_PAGE_MASK;
- physical &= TARGET_PAGE_MASK;
-
- tlb_set_page(cs, address, physical, prot, mmu_idx, TARGET_PAGE_SIZE);
- return 0;
-}
-
hwaddr superh_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
{
SuperHCPU *cpu = SUPERH_CPU(cs);
@@ -745,7 +660,6 @@ void cpu_sh4_write_mmaped_utlb_addr(CPUSH4State *s, hwaddr addr,
if (needs_tlb_flush) {
tlb_flush_page(CPU(sh_env_get_cpu(s)), vpn << 10);
}
-
} else {
int index = (addr & 0x00003f00) >> 8;
tlb_t * entry = &s->utlb[index];
@@ -885,3 +799,76 @@ bool superh_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
}
return false;
}
+
+bool superh_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
+{
+ SuperHCPU *cpu = SUPERH_CPU(cs);
+ CPUSH4State *env = &cpu->env;
+ int ret;
+
+#ifdef CONFIG_USER_ONLY
+ ret = (access_type == MMU_DATA_STORE ? MMU_DTLB_VIOLATION_WRITE :
+ access_type == MMU_INST_FETCH ? MMU_ITLB_VIOLATION :
+ MMU_DTLB_VIOLATION_READ);
+#else
+ target_ulong physical;
+ int prot, sh_access_type;
+
+ sh_access_type = ACCESS_INT;
+ ret = get_physical_address(env, &physical, &prot, address,
+ access_type, sh_access_type);
+
+ if (ret == MMU_OK) {
+ address &= TARGET_PAGE_MASK;
+ physical &= TARGET_PAGE_MASK;
+ tlb_set_page(cs, address, physical, prot, mmu_idx, TARGET_PAGE_SIZE);
+ return true;
+ }
+ if (probe) {
+ return false;
+ }
+
+ if (ret != MMU_DTLB_MULTIPLE && ret != MMU_ITLB_MULTIPLE) {
+ env->pteh = (env->pteh & PTEH_ASID_MASK) | (address & PTEH_VPN_MASK);
+ }
+#endif
+
+ env->tea = address;
+ switch (ret) {
+ case MMU_ITLB_MISS:
+ case MMU_DTLB_MISS_READ:
+ cs->exception_index = 0x040;
+ break;
+ case MMU_DTLB_MULTIPLE:
+ case MMU_ITLB_MULTIPLE:
+ cs->exception_index = 0x140;
+ break;
+ case MMU_ITLB_VIOLATION:
+ cs->exception_index = 0x0a0;
+ break;
+ case MMU_DTLB_MISS_WRITE:
+ cs->exception_index = 0x060;
+ break;
+ case MMU_DTLB_INITIAL_WRITE:
+ cs->exception_index = 0x080;
+ break;
+ case MMU_DTLB_VIOLATION_READ:
+ cs->exception_index = 0x0a0;
+ break;
+ case MMU_DTLB_VIOLATION_WRITE:
+ cs->exception_index = 0x0c0;
+ break;
+ case MMU_IADDR_ERROR:
+ case MMU_DADDR_ERROR_READ:
+ cs->exception_index = 0x0e0;
+ break;
+ case MMU_DADDR_ERROR_WRITE:
+ cs->exception_index = 0x100;
+ break;
+ default:
+ cpu_abort(cs, "Unhandled MMU fault");
+ }
+ cpu_loop_exit_restore(cs, retaddr);
+}
diff --git a/target/sh4/op_helper.c b/target/sh4/op_helper.c
index 28027f9..bd5d782 100644
--- a/target/sh4/op_helper.c
+++ b/target/sh4/op_helper.c
@@ -41,18 +41,6 @@ void superh_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
cpu_loop_exit_restore(cs, retaddr);
}
-void tlb_fill(CPUState *cs, target_ulong addr, int size,
- MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
-{
- int ret;
-
- ret = superh_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
- if (ret) {
- /* now we have a real cpu fault */
- cpu_loop_exit_restore(cs, retaddr);
- }
-}
-
#endif
void helper_ldtlb(CPUSH4State *env)
diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
index 4654c2a..f93ce72 100644
--- a/target/sparc/cpu.c
+++ b/target/sparc/cpu.c
@@ -875,9 +875,8 @@ static void sparc_cpu_class_init(ObjectClass *oc, void *data)
cc->synchronize_from_tb = sparc_cpu_synchronize_from_tb;
cc->gdb_read_register = sparc_cpu_gdb_read_register;
cc->gdb_write_register = sparc_cpu_gdb_write_register;
-#ifdef CONFIG_USER_ONLY
- cc->handle_mmu_fault = sparc_cpu_handle_mmu_fault;
-#else
+ cc->tlb_fill = sparc_cpu_tlb_fill;
+#ifndef CONFIG_USER_ONLY
cc->do_unassigned_access = sparc_cpu_unassigned_access;
cc->do_unaligned_access = sparc_cpu_do_unaligned_access;
cc->get_phys_page_debug = sparc_cpu_get_phys_page_debug;
diff --git a/target/sparc/cpu.h b/target/sparc/cpu.h
index 85b9665..f31e853 100644
--- a/target/sparc/cpu.h
+++ b/target/sparc/cpu.h
@@ -579,8 +579,9 @@ void cpu_raise_exception_ra(CPUSPARCState *, int, uintptr_t) QEMU_NORETURN;
void cpu_sparc_set_id(CPUSPARCState *env, unsigned int cpu);
void sparc_cpu_list(void);
/* mmu_helper.c */
-int sparc_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw,
- int mmu_idx);
+bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
target_ulong mmu_probe(CPUSPARCState *env, target_ulong address, int mmulev);
void dump_mmu(CPUSPARCState *env);
diff --git a/target/sparc/ldst_helper.c b/target/sparc/ldst_helper.c
index a7fcb84..b4bf6fa 100644
--- a/target/sparc/ldst_helper.c
+++ b/target/sparc/ldst_helper.c
@@ -1924,19 +1924,4 @@ void QEMU_NORETURN sparc_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
#endif
cpu_raise_exception_ra(env, TT_UNALIGNED, retaddr);
}
-
-/* try to fill the TLB and return an exception if error. If retaddr is
- NULL, it means that the function was called in C code (i.e. not
- from generated code or from helper.c) */
-/* XXX: fix it to restore all registers */
-void tlb_fill(CPUState *cs, target_ulong addr, int size,
- MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
-{
- int ret;
-
- ret = sparc_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
- if (ret) {
- cpu_loop_exit_restore(cs, retaddr);
- }
-}
#endif
diff --git a/target/sparc/mmu_helper.c b/target/sparc/mmu_helper.c
index afcc5b6..facc0c6 100644
--- a/target/sparc/mmu_helper.c
+++ b/target/sparc/mmu_helper.c
@@ -27,13 +27,14 @@
#if defined(CONFIG_USER_ONLY)
-int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
- int mmu_idx)
+bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
SPARCCPU *cpu = SPARC_CPU(cs);
CPUSPARCState *env = &cpu->env;
- if (rw & 2) {
+ if (access_type == MMU_INST_FETCH) {
cs->exception_index = TT_TFAULT;
} else {
cs->exception_index = TT_DFAULT;
@@ -43,7 +44,7 @@ int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
env->mmuregs[4] = address;
#endif
}
- return 1;
+ cpu_loop_exit_restore(cs, retaddr);
}
#else
@@ -208,8 +209,9 @@ static int get_physical_address(CPUSPARCState *env, hwaddr *physical,
}
/* Perform address translation */
-int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
- int mmu_idx)
+bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
SPARCCPU *cpu = SPARC_CPU(cs);
CPUSPARCState *env = &cpu->env;
@@ -218,16 +220,26 @@ int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
target_ulong page_size;
int error_code = 0, prot, access_index;
+ /*
+ * TODO: If we ever need tlb_vaddr_to_host for this target,
+ * then we must figure out how to manipulate FSR and FAR
+ * when both MMU_NF and probe are set. In the meantime,
+ * do not support this use case.
+ */
+ assert(!probe);
+
address &= TARGET_PAGE_MASK;
error_code = get_physical_address(env, &paddr, &prot, &access_index,
- address, rw, mmu_idx, &page_size);
+ address, access_type,
+ mmu_idx, &page_size);
vaddr = address;
- if (error_code == 0) {
+ if (likely(error_code == 0)) {
qemu_log_mask(CPU_LOG_MMU,
- "Translate at %" VADDR_PRIx " -> " TARGET_FMT_plx ", vaddr "
- TARGET_FMT_lx "\n", address, paddr, vaddr);
+ "Translate at %" VADDR_PRIx " -> "
+ TARGET_FMT_plx ", vaddr " TARGET_FMT_lx "\n",
+ address, paddr, vaddr);
tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, page_size);
- return 0;
+ return true;
}
if (env->mmuregs[3]) { /* Fault status register */
@@ -243,14 +255,14 @@ int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
switching to normal mode. */
prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, TARGET_PAGE_SIZE);
- return 0;
+ return true;
} else {
- if (rw & 2) {
+ if (access_type == MMU_INST_FETCH) {
cs->exception_index = TT_TFAULT;
} else {
cs->exception_index = TT_DFAULT;
}
- return 1;
+ cpu_loop_exit_restore(cs, retaddr);
}
}
@@ -713,8 +725,9 @@ static int get_physical_address(CPUSPARCState *env, hwaddr *physical,
}
/* Perform address translation */
-int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
- int mmu_idx)
+bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
SPARCCPU *cpu = SPARC_CPU(cs);
CPUSPARCState *env = &cpu->env;
@@ -725,8 +738,9 @@ int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
address &= TARGET_PAGE_MASK;
error_code = get_physical_address(env, &paddr, &prot, &access_index,
- address, rw, mmu_idx, &page_size);
- if (error_code == 0) {
+ address, access_type,
+ mmu_idx, &page_size);
+ if (likely(error_code == 0)) {
vaddr = address;
trace_mmu_helper_mmu_fault(address, paddr, mmu_idx, env->tl,
@@ -734,10 +748,12 @@ int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
env->dmmu.mmu_secondary_context);
tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, page_size);
- return 0;
+ return true;
}
- /* XXX */
- return 1;
+ if (probe) {
+ return false;
+ }
+ cpu_loop_exit_restore(cs, retaddr);
}
void dump_mmu(CPUSPARCState *env)
diff --git a/target/tilegx/cpu.c b/target/tilegx/cpu.c
index b9d3710..b209c55 100644
--- a/target/tilegx/cpu.c
+++ b/target/tilegx/cpu.c
@@ -25,6 +25,7 @@
#include "hw/qdev-properties.h"
#include "linux-user/syscall_defs.h"
#include "qemu/qemu-print.h"
+#include "exec/exec-all.h"
static void tilegx_cpu_dump_state(CPUState *cs, FILE *f, int flags)
{
@@ -111,8 +112,9 @@ static void tilegx_cpu_do_interrupt(CPUState *cs)
cs->exception_index = -1;
}
-static int tilegx_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
- int rw, int mmu_idx)
+static bool tilegx_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
TileGXCPU *cpu = TILEGX_CPU(cs);
@@ -122,7 +124,7 @@ static int tilegx_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
cpu->env.signo = TARGET_SIGSEGV;
cpu->env.sigcode = 0;
- return 1;
+ cpu_loop_exit_restore(cs, retaddr);
}
static bool tilegx_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
@@ -152,7 +154,7 @@ static void tilegx_cpu_class_init(ObjectClass *oc, void *data)
cc->cpu_exec_interrupt = tilegx_cpu_exec_interrupt;
cc->dump_state = tilegx_cpu_dump_state;
cc->set_pc = tilegx_cpu_set_pc;
- cc->handle_mmu_fault = tilegx_cpu_handle_mmu_fault;
+ cc->tlb_fill = tilegx_cpu_tlb_fill;
cc->gdb_num_core_regs = 0;
cc->tcg_initialize = tilegx_tcg_init;
}
diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c
index e8d37e4..ea1199d 100644
--- a/target/tricore/cpu.c
+++ b/target/tricore/cpu.c
@@ -166,6 +166,7 @@ static void tricore_cpu_class_init(ObjectClass *c, void *data)
cc->synchronize_from_tb = tricore_cpu_synchronize_from_tb;
cc->get_phys_page_attrs_debug = tricore_cpu_get_phys_page_attrs_debug;
cc->tcg_initialize = tricore_tcg_init;
+ cc->tlb_fill = tricore_cpu_tlb_fill;
}
#define DEFINE_TRICORE_CPU_TYPE(cpu_model, initfn) \
diff --git a/target/tricore/cpu.h b/target/tricore/cpu.h
index 64d1a9c..287f432 100644
--- a/target/tricore/cpu.h
+++ b/target/tricore/cpu.h
@@ -417,8 +417,8 @@ static inline void cpu_get_tb_cpu_state(CPUTriCoreState *env, target_ulong *pc,
#define CPU_RESOLVING_TYPE TYPE_TRICORE_CPU
/* helpers.c */
-int cpu_tricore_handle_mmu_fault(CPUState *cpu, target_ulong address,
- int rw, int mmu_idx);
-#define cpu_handle_mmu_fault cpu_tricore_handle_mmu_fault
+bool tricore_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
#endif /* TRICORE_CPU_H */
diff --git a/target/tricore/helper.c b/target/tricore/helper.c
index 78ee87c..a680336 100644
--- a/target/tricore/helper.c
+++ b/target/tricore/helper.c
@@ -50,8 +50,9 @@ static void raise_mmu_exception(CPUTriCoreState *env, target_ulong address,
{
}
-int cpu_tricore_handle_mmu_fault(CPUState *cs, target_ulong address,
- int rw, int mmu_idx)
+bool tricore_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType rw, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
TriCoreCPU *cpu = TRICORE_CPU(cs);
CPUTriCoreState *env = &cpu->env;
@@ -64,20 +65,24 @@ int cpu_tricore_handle_mmu_fault(CPUState *cs, target_ulong address,
access_type = ACCESS_INT;
ret = get_physical_address(env, &physical, &prot,
address, rw, access_type);
- qemu_log_mask(CPU_LOG_MMU, "%s address=" TARGET_FMT_lx " ret %d physical " TARGET_FMT_plx
- " prot %d\n", __func__, address, ret, physical, prot);
+
+ qemu_log_mask(CPU_LOG_MMU, "%s address=" TARGET_FMT_lx " ret %d physical "
+ TARGET_FMT_plx " prot %d\n",
+ __func__, (target_ulong)address, ret, physical, prot);
if (ret == TLBRET_MATCH) {
tlb_set_page(cs, address & TARGET_PAGE_MASK,
physical & TARGET_PAGE_MASK, prot | PAGE_EXEC,
mmu_idx, TARGET_PAGE_SIZE);
- ret = 0;
- } else if (ret < 0) {
+ return true;
+ } else {
+ assert(ret < 0);
+ if (probe) {
+ return false;
+ }
raise_mmu_exception(env, address, rw, ret);
- ret = 1;
+ cpu_loop_exit_restore(cs, retaddr);
}
-
- return ret;
}
static void tricore_cpu_list_entry(gpointer data, gpointer user_data)
diff --git a/target/tricore/op_helper.c b/target/tricore/op_helper.c
index ed9dc0c..601e92f 100644
--- a/target/tricore/op_helper.c
+++ b/target/tricore/op_helper.c
@@ -2793,29 +2793,3 @@ uint32_t helper_psw_read(CPUTriCoreState *env)
{
return psw_read(env);
}
-
-
-static inline void QEMU_NORETURN do_raise_exception_err(CPUTriCoreState *env,
- uint32_t exception,
- int error_code,
- uintptr_t pc)
-{
- CPUState *cs = CPU(tricore_env_get_cpu(env));
- cs->exception_index = exception;
- env->error_code = error_code;
- /* now we have a real cpu fault */
- cpu_loop_exit_restore(cs, pc);
-}
-
-void tlb_fill(CPUState *cs, target_ulong addr, int size,
- MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
-{
- int ret;
- ret = cpu_tricore_handle_mmu_fault(cs, addr, access_type, mmu_idx);
- if (ret) {
- TriCoreCPU *cpu = TRICORE_CPU(cs);
- CPUTriCoreState *env = &cpu->env;
- do_raise_exception_err(env, cs->exception_index,
- env->error_code, retaddr);
- }
-}
diff --git a/target/unicore32/cpu.c b/target/unicore32/cpu.c
index 2b49d1c..3f57c50 100644
--- a/target/unicore32/cpu.c
+++ b/target/unicore32/cpu.c
@@ -138,11 +138,8 @@ static void uc32_cpu_class_init(ObjectClass *oc, void *data)
cc->cpu_exec_interrupt = uc32_cpu_exec_interrupt;
cc->dump_state = uc32_cpu_dump_state;
cc->set_pc = uc32_cpu_set_pc;
-#ifdef CONFIG_USER_ONLY
- cc->handle_mmu_fault = uc32_cpu_handle_mmu_fault;
-#else
+ cc->tlb_fill = uc32_cpu_tlb_fill;
cc->get_phys_page_debug = uc32_cpu_get_phys_page_debug;
-#endif
cc->tcg_initialize = uc32_translate_init;
dc->vmsd = &vmstate_uc32_cpu;
}
diff --git a/target/unicore32/cpu.h b/target/unicore32/cpu.h
index 24abe5e..f052ee0 100644
--- a/target/unicore32/cpu.h
+++ b/target/unicore32/cpu.h
@@ -178,8 +178,9 @@ static inline void cpu_get_tb_cpu_state(CPUUniCore32State *env, target_ulong *pc
}
}
-int uc32_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw,
- int mmu_idx);
+bool uc32_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
void uc32_translate_init(void);
void switch_mode(CPUUniCore32State *, int);
diff --git a/target/unicore32/helper.c b/target/unicore32/helper.c
index a5ff2dd..0d4914b 100644
--- a/target/unicore32/helper.c
+++ b/target/unicore32/helper.c
@@ -215,29 +215,6 @@ void helper_cp1_putc(target_ulong x)
}
#endif
-#ifdef CONFIG_USER_ONLY
-void switch_mode(CPUUniCore32State *env, int mode)
-{
- UniCore32CPU *cpu = uc32_env_get_cpu(env);
-
- if (mode != ASR_MODE_USER) {
- cpu_abort(CPU(cpu), "Tried to switch out of user mode\n");
- }
-}
-
-void uc32_cpu_do_interrupt(CPUState *cs)
-{
- cpu_abort(cs, "NO interrupt in user mode\n");
-}
-
-int uc32_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
- int access_type, int mmu_idx)
-{
- cpu_abort(cs, "NO mmu fault in user mode\n");
- return 1;
-}
-#endif
-
bool uc32_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
{
if (interrupt_request & CPU_INTERRUPT_HARD) {
diff --git a/target/unicore32/op_helper.c b/target/unicore32/op_helper.c
index e0a1588..797ba60 100644
--- a/target/unicore32/op_helper.c
+++ b/target/unicore32/op_helper.c
@@ -242,17 +242,3 @@ uint32_t HELPER(ror_cc)(CPUUniCore32State *env, uint32_t x, uint32_t i)
return ((uint32_t)x >> shift) | (x << (32 - shift));
}
}
-
-#ifndef CONFIG_USER_ONLY
-void tlb_fill(CPUState *cs, target_ulong addr, int size,
- MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
-{
- int ret;
-
- ret = uc32_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
- if (unlikely(ret)) {
- /* now we have a real cpu fault */
- cpu_loop_exit_restore(cs, retaddr);
- }
-}
-#endif
diff --git a/target/unicore32/softmmu.c b/target/unicore32/softmmu.c
index 00c7e0d..27f218a 100644
--- a/target/unicore32/softmmu.c
+++ b/target/unicore32/softmmu.c
@@ -215,8 +215,9 @@ do_fault:
return code;
}
-int uc32_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
- int access_type, int mmu_idx)
+bool uc32_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
UniCore32CPU *cpu = UNICORE32_CPU(cs);
CPUUniCore32State *env = &cpu->env;
@@ -257,7 +258,11 @@ int uc32_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
phys_addr &= TARGET_PAGE_MASK;
address &= TARGET_PAGE_MASK;
tlb_set_page(cs, address, phys_addr, prot, mmu_idx, page_size);
- return 0;
+ return true;
+ }
+
+ if (probe) {
+ return false;
}
env->cp0.c3_faultstatus = ret;
@@ -267,7 +272,7 @@ int uc32_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
} else {
cs->exception_index = UC32_EXCP_DTRAP;
}
- return ret;
+ cpu_loop_exit_restore(cs, retaddr);
}
hwaddr uc32_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c
index a54dbe4..da12363 100644
--- a/target/xtensa/cpu.c
+++ b/target/xtensa/cpu.c
@@ -181,9 +181,8 @@ static void xtensa_cpu_class_init(ObjectClass *oc, void *data)
cc->gdb_read_register = xtensa_cpu_gdb_read_register;
cc->gdb_write_register = xtensa_cpu_gdb_write_register;
cc->gdb_stop_before_watchpoint = true;
-#ifdef CONFIG_USER_ONLY
- cc->handle_mmu_fault = xtensa_cpu_handle_mmu_fault;
-#else
+ cc->tlb_fill = xtensa_cpu_tlb_fill;
+#ifndef CONFIG_USER_ONLY
cc->do_unaligned_access = xtensa_cpu_do_unaligned_access;
cc->get_phys_page_debug = xtensa_cpu_get_phys_page_debug;
cc->do_transaction_failed = xtensa_cpu_do_transaction_failed;
diff --git a/target/xtensa/cpu.h b/target/xtensa/cpu.h
index 5d23e134..68d89f8 100644
--- a/target/xtensa/cpu.h
+++ b/target/xtensa/cpu.h
@@ -552,8 +552,9 @@ static inline XtensaCPU *xtensa_env_get_cpu(const CPUXtensaState *env)
#define ENV_OFFSET offsetof(XtensaCPU, env)
-int xtensa_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw, int size,
- int mmu_idx);
+bool xtensa_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
void xtensa_cpu_do_interrupt(CPUState *cpu);
bool xtensa_cpu_exec_interrupt(CPUState *cpu, int interrupt_request);
void xtensa_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr,
diff --git a/target/xtensa/helper.c b/target/xtensa/helper.c
index 5f37f37..efb966b 100644
--- a/target/xtensa/helper.c
+++ b/target/xtensa/helper.c
@@ -240,19 +240,21 @@ void xtensa_cpu_list(void)
#ifdef CONFIG_USER_ONLY
-int xtensa_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
- int mmu_idx)
+bool xtensa_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
XtensaCPU *cpu = XTENSA_CPU(cs);
CPUXtensaState *env = &cpu->env;
qemu_log_mask(CPU_LOG_INT,
"%s: rw = %d, address = 0x%08" VADDR_PRIx ", size = %d\n",
- __func__, rw, address, size);
+ __func__, access_type, address, size);
env->sregs[EXCVADDR] = address;
- env->sregs[EXCCAUSE] = rw ? STORE_PROHIBITED_CAUSE : LOAD_PROHIBITED_CAUSE;
+ env->sregs[EXCCAUSE] = (access_type == MMU_DATA_STORE ?
+ STORE_PROHIBITED_CAUSE : LOAD_PROHIBITED_CAUSE);
cs->exception_index = EXC_USER;
- return 1;
+ cpu_loop_exit_restore(cs, retaddr);
}
#else
@@ -273,28 +275,33 @@ void xtensa_cpu_do_unaligned_access(CPUState *cs,
}
}
-void tlb_fill(CPUState *cs, target_ulong vaddr, int size,
- MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
+bool xtensa_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
XtensaCPU *cpu = XTENSA_CPU(cs);
CPUXtensaState *env = &cpu->env;
uint32_t paddr;
uint32_t page_size;
unsigned access;
- int ret = xtensa_get_physical_addr(env, true, vaddr, access_type, mmu_idx,
- &paddr, &page_size, &access);
+ int ret = xtensa_get_physical_addr(env, true, address, access_type,
+ mmu_idx, &paddr, &page_size, &access);
- qemu_log_mask(CPU_LOG_MMU, "%s(%08x, %d, %d) -> %08x, ret = %d\n",
- __func__, vaddr, access_type, mmu_idx, paddr, ret);
+ qemu_log_mask(CPU_LOG_MMU, "%s(%08" VADDR_PRIx
+ ", %d, %d) -> %08x, ret = %d\n",
+ __func__, address, access_type, mmu_idx, paddr, ret);
if (ret == 0) {
tlb_set_page(cs,
- vaddr & TARGET_PAGE_MASK,
+ address & TARGET_PAGE_MASK,
paddr & TARGET_PAGE_MASK,
access, mmu_idx, page_size);
+ return true;
+ } else if (probe) {
+ return false;
} else {
cpu_restore_state(cs, retaddr, true);
- HELPER(exception_cause_vaddr)(env, env->pc, ret, vaddr);
+ HELPER(exception_cause_vaddr)(env, env->pc, ret, address);
}
}