diff options
author | aurel32 <aurel32@c046a42c-6fe2-441c-8c8c-71466251a162> | 2009-04-07 21:47:27 +0000 |
---|---|---|
committer | aurel32 <aurel32@c046a42c-6fe2-441c-8c8c-71466251a162> | 2009-04-07 21:47:27 +0000 |
commit | e37e6ee6e100ebc355b4a48ae9a7802b38b8dac0 (patch) | |
tree | f986baa43378643c65cd3e94ebd25452cc41a1be /exec.c | |
parent | 8fcc55f9de36354a8dcae1b47cadf45e8bba5cca (diff) | |
download | qemu-e37e6ee6e100ebc355b4a48ae9a7802b38b8dac0.zip qemu-e37e6ee6e100ebc355b4a48ae9a7802b38b8dac0.tar.gz qemu-e37e6ee6e100ebc355b4a48ae9a7802b38b8dac0.tar.bz2 |
Allow 5 mmu indexes.
This is necessary for alpha because it has 4 protection levels and pal mode.
Signed-off-by: Tristan Gingold <gingold@adacore.com>
Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@7028 c046a42c-6fe2-441c-8c8c-71466251a162
Diffstat (limited to 'exec.c')
-rw-r--r-- | exec.c | 30 |
1 files changed, 25 insertions, 5 deletions
@@ -1734,12 +1734,18 @@ void tlb_flush(CPUState *env, int flush_global) env->tlb_table[2][i].addr_read = -1; env->tlb_table[2][i].addr_write = -1; env->tlb_table[2][i].addr_code = -1; -#if (NB_MMU_MODES == 4) +#endif +#if (NB_MMU_MODES >= 4) env->tlb_table[3][i].addr_read = -1; env->tlb_table[3][i].addr_write = -1; env->tlb_table[3][i].addr_code = -1; #endif +#if (NB_MMU_MODES >= 5) + env->tlb_table[4][i].addr_read = -1; + env->tlb_table[4][i].addr_write = -1; + env->tlb_table[4][i].addr_code = -1; #endif + } memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *)); @@ -1783,9 +1789,12 @@ void tlb_flush_page(CPUState *env, target_ulong addr) tlb_flush_entry(&env->tlb_table[1][i], addr); #if (NB_MMU_MODES >= 3) tlb_flush_entry(&env->tlb_table[2][i], addr); -#if (NB_MMU_MODES == 4) +#endif +#if (NB_MMU_MODES >= 4) tlb_flush_entry(&env->tlb_table[3][i], addr); #endif +#if (NB_MMU_MODES >= 5) + tlb_flush_entry(&env->tlb_table[4][i], addr); #endif tlb_flush_jmp_cache(env, addr); @@ -1869,10 +1878,14 @@ void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, #if (NB_MMU_MODES >= 3) for(i = 0; i < CPU_TLB_SIZE; i++) tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length); -#if (NB_MMU_MODES == 4) +#endif +#if (NB_MMU_MODES >= 4) for(i = 0; i < CPU_TLB_SIZE; i++) tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length); #endif +#if (NB_MMU_MODES >= 5) + for(i = 0; i < CPU_TLB_SIZE; i++) + tlb_reset_dirty_range(&env->tlb_table[4][i], start1, length); #endif } } @@ -1918,10 +1931,14 @@ void cpu_tlb_update_dirty(CPUState *env) #if (NB_MMU_MODES >= 3) for(i = 0; i < CPU_TLB_SIZE; i++) tlb_update_dirty(&env->tlb_table[2][i]); -#if (NB_MMU_MODES == 4) +#endif +#if (NB_MMU_MODES >= 4) for(i = 0; i < CPU_TLB_SIZE; i++) tlb_update_dirty(&env->tlb_table[3][i]); #endif +#if (NB_MMU_MODES >= 5) + for(i = 0; i < CPU_TLB_SIZE; i++) + tlb_update_dirty(&env->tlb_table[4][i]); #endif } @@ -1943,9 +1960,12 @@ static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr) tlb_set_dirty1(&env->tlb_table[1][i], vaddr); #if (NB_MMU_MODES >= 3) tlb_set_dirty1(&env->tlb_table[2][i], vaddr); -#if (NB_MMU_MODES == 4) +#endif +#if (NB_MMU_MODES >= 4) tlb_set_dirty1(&env->tlb_table[3][i], vaddr); #endif +#if (NB_MMU_MODES >= 5) + tlb_set_dirty1(&env->tlb_table[4][i], vaddr); #endif } |