aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--exec.c99
1 files changed, 23 insertions, 76 deletions
diff --git a/exec.c b/exec.c
index bbaf0b6..92ee4e2 100644
--- a/exec.c
+++ b/exec.c
@@ -1762,28 +1762,12 @@ void tlb_flush(CPUState *env, int flush_global)
env->current_tb = NULL;
for(i = 0; i < CPU_TLB_SIZE; i++) {
- env->tlb_table[0][i].addr_read = -1;
- env->tlb_table[0][i].addr_write = -1;
- env->tlb_table[0][i].addr_code = -1;
- env->tlb_table[1][i].addr_read = -1;
- env->tlb_table[1][i].addr_write = -1;
- env->tlb_table[1][i].addr_code = -1;
-#if (NB_MMU_MODES >= 3)
- env->tlb_table[2][i].addr_read = -1;
- env->tlb_table[2][i].addr_write = -1;
- env->tlb_table[2][i].addr_code = -1;
-#endif
-#if (NB_MMU_MODES >= 4)
- env->tlb_table[3][i].addr_read = -1;
- env->tlb_table[3][i].addr_write = -1;
- env->tlb_table[3][i].addr_code = -1;
-#endif
-#if (NB_MMU_MODES >= 5)
- env->tlb_table[4][i].addr_read = -1;
- env->tlb_table[4][i].addr_write = -1;
- env->tlb_table[4][i].addr_code = -1;
-#endif
-
+ int mmu_idx;
+ for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
+ env->tlb_table[mmu_idx][i].addr_read = -1;
+ env->tlb_table[mmu_idx][i].addr_write = -1;
+ env->tlb_table[mmu_idx][i].addr_code = -1;
+ }
}
memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
@@ -1813,6 +1797,7 @@ static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
void tlb_flush_page(CPUState *env, target_ulong addr)
{
int i;
+ int mmu_idx;
#if defined(DEBUG_TLB)
printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
@@ -1823,17 +1808,8 @@ void tlb_flush_page(CPUState *env, target_ulong addr)
addr &= TARGET_PAGE_MASK;
i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
- tlb_flush_entry(&env->tlb_table[0][i], addr);
- tlb_flush_entry(&env->tlb_table[1][i], addr);
-#if (NB_MMU_MODES >= 3)
- tlb_flush_entry(&env->tlb_table[2][i], addr);
-#endif
-#if (NB_MMU_MODES >= 4)
- tlb_flush_entry(&env->tlb_table[3][i], addr);
-#endif
-#if (NB_MMU_MODES >= 5)
- tlb_flush_entry(&env->tlb_table[4][i], addr);
-#endif
+ for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
+ tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
tlb_flush_jmp_cache(env, addr);
@@ -1917,22 +1893,12 @@ void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
}
for(env = first_cpu; env != NULL; env = env->next_cpu) {
- for(i = 0; i < CPU_TLB_SIZE; i++)
- tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
- for(i = 0; i < CPU_TLB_SIZE; i++)
- tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
-#if (NB_MMU_MODES >= 3)
- for(i = 0; i < CPU_TLB_SIZE; i++)
- tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
-#endif
-#if (NB_MMU_MODES >= 4)
- for(i = 0; i < CPU_TLB_SIZE; i++)
- tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
-#endif
-#if (NB_MMU_MODES >= 5)
- for(i = 0; i < CPU_TLB_SIZE; i++)
- tlb_reset_dirty_range(&env->tlb_table[4][i], start1, length);
-#endif
+ int mmu_idx;
+ for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
+ for(i = 0; i < CPU_TLB_SIZE; i++)
+ tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
+ start1, length);
+ }
}
}
@@ -1979,22 +1945,11 @@ static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
void cpu_tlb_update_dirty(CPUState *env)
{
int i;
- for(i = 0; i < CPU_TLB_SIZE; i++)
- tlb_update_dirty(&env->tlb_table[0][i]);
- for(i = 0; i < CPU_TLB_SIZE; i++)
- tlb_update_dirty(&env->tlb_table[1][i]);
-#if (NB_MMU_MODES >= 3)
- for(i = 0; i < CPU_TLB_SIZE; i++)
- tlb_update_dirty(&env->tlb_table[2][i]);
-#endif
-#if (NB_MMU_MODES >= 4)
- for(i = 0; i < CPU_TLB_SIZE; i++)
- tlb_update_dirty(&env->tlb_table[3][i]);
-#endif
-#if (NB_MMU_MODES >= 5)
- for(i = 0; i < CPU_TLB_SIZE; i++)
- tlb_update_dirty(&env->tlb_table[4][i]);
-#endif
+ int mmu_idx;
+ for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
+ for(i = 0; i < CPU_TLB_SIZE; i++)
+ tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
+ }
}
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
@@ -2008,20 +1963,12 @@ static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
{
int i;
+ int mmu_idx;
vaddr &= TARGET_PAGE_MASK;
i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
- tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
- tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
-#if (NB_MMU_MODES >= 3)
- tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
-#endif
-#if (NB_MMU_MODES >= 4)
- tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
-#endif
-#if (NB_MMU_MODES >= 5)
- tlb_set_dirty1(&env->tlb_table[4][i], vaddr);
-#endif
+ for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
+ tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
}
/* add a new TLB entry. At most one entry for a given virtual address