From 0f590e749f7c838bfd40b79242fc5aeb91e81747 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 28 Mar 2014 17:55:24 +0100 Subject: softmmu: commonize helper definitions They do not need to be in op_helper.c. Because cputlb.c now includes softmmu_template.h twice for each size, io_readX must be elided the second time through. Reviewed-by: Richard Henderson Signed-off-by: Paolo Bonzini --- target-i386/mem_helper.c | 18 ------------------ 1 file changed, 18 deletions(-) (limited to 'target-i386') diff --git a/target-i386/mem_helper.c b/target-i386/mem_helper.c index 83aa103..16bc91b 100644 --- a/target-i386/mem_helper.c +++ b/target-i386/mem_helper.c @@ -110,24 +110,6 @@ void helper_boundl(CPUX86State *env, target_ulong a0, int v) } #if !defined(CONFIG_USER_ONLY) - -#define MMUSUFFIX _mmu - -#define SHIFT 0 -#include "exec/softmmu_template.h" - -#define SHIFT 1 -#include "exec/softmmu_template.h" - -#define SHIFT 2 -#include "exec/softmmu_template.h" - -#define SHIFT 3 -#include "exec/softmmu_template.h" - -#endif - -#if !defined(CONFIG_USER_ONLY) /* try to fill the TLB and return an exception if error. If retaddr is * NULL, it means that the function was called in C code (i.e. not * from generated code or from helper.c) -- cgit v1.1 From f08b617018e424134a0a012b08253d567c62f7ee Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 28 Mar 2014 19:42:10 +0100 Subject: softmmu: introduce cpu_ldst.h This will collect all load and store helpers soon. For now it is just a replacement for softmmu_exec.h, which this patch stops including directly, but we also include it where this will be necessary in order to simplify the next patch. Reviewed-by: Richard Henderson Signed-off-by: Paolo Bonzini --- target-i386/fpu_helper.c | 5 +---- target-i386/mem_helper.c | 5 +---- target-i386/misc_helper.c | 5 +---- target-i386/seg_helper.c | 5 +---- target-i386/svm_helper.c | 5 +---- target-i386/translate.c | 1 + 6 files changed, 6 insertions(+), 20 deletions(-) (limited to 'target-i386') diff --git a/target-i386/fpu_helper.c b/target-i386/fpu_helper.c index a04e754..1b2900d 100644 --- a/target-i386/fpu_helper.c +++ b/target-i386/fpu_helper.c @@ -22,10 +22,7 @@ #include "exec/helper-proto.h" #include "qemu/aes.h" #include "qemu/host-utils.h" - -#if !defined(CONFIG_USER_ONLY) -#include "exec/softmmu_exec.h" -#endif /* !defined(CONFIG_USER_ONLY) */ +#include "exec/cpu_ldst.h" #define FPU_RC_MASK 0xc00 #define FPU_RC_NEAR 0x000 diff --git a/target-i386/mem_helper.c b/target-i386/mem_helper.c index 16bc91b..1aec8a5 100644 --- a/target-i386/mem_helper.c +++ b/target-i386/mem_helper.c @@ -19,10 +19,7 @@ #include "cpu.h" #include "exec/helper-proto.h" - -#if !defined(CONFIG_USER_ONLY) -#include "exec/softmmu_exec.h" -#endif /* !defined(CONFIG_USER_ONLY) */ +#include "exec/cpu_ldst.h" /* broken thread support */ diff --git a/target-i386/misc_helper.c b/target-i386/misc_helper.c index 9cfa25f..4f1e30f 100644 --- a/target-i386/misc_helper.c +++ b/target-i386/misc_helper.c @@ -20,10 +20,7 @@ #include "cpu.h" #include "exec/ioport.h" #include "exec/helper-proto.h" - -#if !defined(CONFIG_USER_ONLY) -#include "exec/softmmu_exec.h" -#endif /* !defined(CONFIG_USER_ONLY) */ +#include "exec/cpu_ldst.h" /* check if Port I/O is allowed in TSS */ static inline void check_io(CPUX86State *env, int addr, int size) diff --git a/target-i386/seg_helper.c b/target-i386/seg_helper.c index 258aae8..ba47215 100644 --- a/target-i386/seg_helper.c +++ b/target-i386/seg_helper.c @@ -21,13 +21,10 @@ #include "cpu.h" #include "qemu/log.h" #include "exec/helper-proto.h" +#include "exec/cpu_ldst.h" //#define DEBUG_PCALL -#if !defined(CONFIG_USER_ONLY) -#include "exec/softmmu_exec.h" -#endif /* !defined(CONFIG_USER_ONLY) */ - #ifdef DEBUG_PCALL # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__) # define LOG_PCALL_STATE(cpu) \ diff --git a/target-i386/svm_helper.c b/target-i386/svm_helper.c index 852e2ba..d250d18 100644 --- a/target-i386/svm_helper.c +++ b/target-i386/svm_helper.c @@ -20,10 +20,7 @@ #include "cpu.h" #include "exec/cpu-all.h" #include "exec/helper-proto.h" - -#if !defined(CONFIG_USER_ONLY) -#include "exec/softmmu_exec.h" -#endif /* !defined(CONFIG_USER_ONLY) */ +#include "exec/cpu_ldst.h" /* Secure Virtual Machine helpers */ diff --git a/target-i386/translate.c b/target-i386/translate.c index 3aa52eb..2359787 100644 --- a/target-i386/translate.c +++ b/target-i386/translate.c @@ -27,6 +27,7 @@ #include "cpu.h" #include "disas/disas.h" #include "tcg-op.h" +#include "exec/cpu_ldst.h" #include "exec/helper-proto.h" #include "exec/helper-gen.h" -- cgit v1.1 From 43773ed369a2e1f6c347e30c74df27a8750d1d2d Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 28 Mar 2014 11:28:38 +0100 Subject: target-i386: rename KSMAP to KNOSMAP This is the mode where SMAP is overridden, put "NO" in its name. Signed-off-by: Paolo Bonzini --- target-i386/cpu.h | 6 +++--- target-i386/helper.c | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) (limited to 'target-i386') diff --git a/target-i386/cpu.h b/target-i386/cpu.h index e9cbdab..f88b675 100644 --- a/target-i386/cpu.h +++ b/target-i386/cpu.h @@ -1155,15 +1155,15 @@ static inline CPUX86State *cpu_init(const char *cpu_model) /* MMU modes definitions */ #define MMU_MODE0_SUFFIX _kernel #define MMU_MODE1_SUFFIX _user -#define MMU_MODE2_SUFFIX _ksmap /* Kernel with SMAP override */ +#define MMU_MODE2_SUFFIX _knosmap /* SMAP disabled or CPL<3 && AC=1 */ #define MMU_KERNEL_IDX 0 #define MMU_USER_IDX 1 -#define MMU_KSMAP_IDX 2 +#define MMU_KNOSMAP_IDX 2 static inline int cpu_mmu_index (CPUX86State *env) { return (env->hflags & HF_CPL_MASK) == 3 ? MMU_USER_IDX : ((env->hflags & HF_SMAP_MASK) && (env->eflags & AC_MASK)) - ? MMU_KSMAP_IDX : MMU_KERNEL_IDX; + ? MMU_KNOSMAP_IDX : MMU_KERNEL_IDX; } #define CC_DST (env->cc_dst) diff --git a/target-i386/helper.c b/target-i386/helper.c index 46d20e4..88bbe65 100644 --- a/target-i386/helper.c +++ b/target-i386/helper.c @@ -654,7 +654,7 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, goto do_fault_protect; } /* fall through */ - case MMU_KSMAP_IDX: + case MMU_KNOSMAP_IDX: if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) && (ptep & PG_USER_MASK)) { goto do_fault_protect; @@ -716,7 +716,7 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, goto do_fault_protect; } /* fall through */ - case MMU_KSMAP_IDX: + case MMU_KNOSMAP_IDX: if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) && (ptep & PG_USER_MASK)) { goto do_fault_protect; @@ -771,7 +771,7 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, goto do_fault_protect; } /* fall through */ - case MMU_KSMAP_IDX: + case MMU_KNOSMAP_IDX: if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) && (pde & PG_USER_MASK)) { goto do_fault_protect; @@ -828,7 +828,7 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, goto do_fault_protect; } /* fall through */ - case MMU_KSMAP_IDX: + case MMU_KNOSMAP_IDX: if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) && (ptep & PG_USER_MASK)) { goto do_fault_protect; -- cgit v1.1 From 81cf8d8adc64203567e03326c13ea4abec9fe5df Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 28 Mar 2014 18:47:57 +0100 Subject: target-i386: move check_io helpers to seg_helper.c Prepare for adding _kernel accessors there in the next patch. Signed-off-by: Paolo Bonzini --- target-i386/misc_helper.c | 42 ------------------------------------------ target-i386/seg_helper.c | 42 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 42 insertions(+), 42 deletions(-) (limited to 'target-i386') diff --git a/target-i386/misc_helper.c b/target-i386/misc_helper.c index 4f1e30f..4aaf1e4 100644 --- a/target-i386/misc_helper.c +++ b/target-i386/misc_helper.c @@ -22,48 +22,6 @@ #include "exec/helper-proto.h" #include "exec/cpu_ldst.h" -/* check if Port I/O is allowed in TSS */ -static inline void check_io(CPUX86State *env, int addr, int size) -{ - int io_offset, val, mask; - - /* TSS must be a valid 32 bit one */ - if (!(env->tr.flags & DESC_P_MASK) || - ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 || - env->tr.limit < 103) { - goto fail; - } - io_offset = cpu_lduw_kernel(env, env->tr.base + 0x66); - io_offset += (addr >> 3); - /* Note: the check needs two bytes */ - if ((io_offset + 1) > env->tr.limit) { - goto fail; - } - val = cpu_lduw_kernel(env, env->tr.base + io_offset); - val >>= (addr & 7); - mask = (1 << size) - 1; - /* all bits must be zero to allow the I/O */ - if ((val & mask) != 0) { - fail: - raise_exception_err(env, EXCP0D_GPF, 0); - } -} - -void helper_check_iob(CPUX86State *env, uint32_t t0) -{ - check_io(env, t0, 1); -} - -void helper_check_iow(CPUX86State *env, uint32_t t0) -{ - check_io(env, t0, 2); -} - -void helper_check_iol(CPUX86State *env, uint32_t t0) -{ - check_io(env, t0, 4); -} - void helper_outb(uint32_t port, uint32_t data) { cpu_outb(port, data & 0xff); diff --git a/target-i386/seg_helper.c b/target-i386/seg_helper.c index ba47215..31c5508 100644 --- a/target-i386/seg_helper.c +++ b/target-i386/seg_helper.c @@ -2469,3 +2469,45 @@ void cpu_x86_load_seg(CPUX86State *env, int seg_reg, int selector) } } #endif + +/* check if Port I/O is allowed in TSS */ +static inline void check_io(CPUX86State *env, int addr, int size) +{ + int io_offset, val, mask; + + /* TSS must be a valid 32 bit one */ + if (!(env->tr.flags & DESC_P_MASK) || + ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 || + env->tr.limit < 103) { + goto fail; + } + io_offset = cpu_lduw_kernel(env, env->tr.base + 0x66); + io_offset += (addr >> 3); + /* Note: the check needs two bytes */ + if ((io_offset + 1) > env->tr.limit) { + goto fail; + } + val = cpu_lduw_kernel(env, env->tr.base + io_offset); + val >>= (addr & 7); + mask = (1 << size) - 1; + /* all bits must be zero to allow the I/O */ + if ((val & mask) != 0) { + fail: + raise_exception_err(env, EXCP0D_GPF, 0); + } +} + +void helper_check_iob(CPUX86State *env, uint32_t t0) +{ + check_io(env, t0, 1); +} + +void helper_check_iow(CPUX86State *env, uint32_t t0) +{ + check_io(env, t0, 2); +} + +void helper_check_iol(CPUX86State *env, uint32_t t0) +{ + check_io(env, t0, 4); +} -- cgit v1.1 From 8a201bd47e4724c5783033aedbdd126a5df7a251 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 28 Mar 2014 11:43:45 +0100 Subject: target-i386: fix kernel accesses with SMAP and CPL = 3 With SMAP, implicit kernel accesses from user mode always behave as if AC=0. To do this, kernel mode is not anymore a separate MMU mode. Instead, KERNEL_IDX is renamed to KSMAP_IDX and the kernel mode accessors wrap KSMAP_IDX and KNOSMAP_IDX. Signed-off-by: Paolo Bonzini --- target-i386/cpu.h | 15 +++++++++++---- target-i386/helper.c | 8 ++++---- target-i386/seg_helper.c | 18 ++++++++++++++++++ 3 files changed, 33 insertions(+), 8 deletions(-) (limited to 'target-i386') diff --git a/target-i386/cpu.h b/target-i386/cpu.h index f88b675..b80df66 100644 --- a/target-i386/cpu.h +++ b/target-i386/cpu.h @@ -1153,17 +1153,24 @@ static inline CPUX86State *cpu_init(const char *cpu_model) #define cpudef_setup x86_cpudef_setup /* MMU modes definitions */ -#define MMU_MODE0_SUFFIX _kernel +#define MMU_MODE0_SUFFIX _ksmap #define MMU_MODE1_SUFFIX _user #define MMU_MODE2_SUFFIX _knosmap /* SMAP disabled or CPL<3 && AC=1 */ -#define MMU_KERNEL_IDX 0 +#define MMU_KSMAP_IDX 0 #define MMU_USER_IDX 1 #define MMU_KNOSMAP_IDX 2 -static inline int cpu_mmu_index (CPUX86State *env) +static inline int cpu_mmu_index(CPUX86State *env) { return (env->hflags & HF_CPL_MASK) == 3 ? MMU_USER_IDX : ((env->hflags & HF_SMAP_MASK) && (env->eflags & AC_MASK)) - ? MMU_KNOSMAP_IDX : MMU_KERNEL_IDX; + ? MMU_KNOSMAP_IDX : MMU_KSMAP_IDX; +} + +static inline int cpu_mmu_index_kernel(CPUX86State *env) +{ + return !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP_IDX : + ((env->hflags & HF_CPL_MASK) < 3 && (env->eflags & AC_MASK)) + ? MMU_KNOSMAP_IDX : MMU_KSMAP_IDX; } #define CC_DST (env->cc_dst) diff --git a/target-i386/helper.c b/target-i386/helper.c index 88bbe65..6d8e350 100644 --- a/target-i386/helper.c +++ b/target-i386/helper.c @@ -648,7 +648,7 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, } break; - case MMU_KERNEL_IDX: + case MMU_KSMAP_IDX: if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) && (ptep & PG_USER_MASK)) { goto do_fault_protect; @@ -710,7 +710,7 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, } break; - case MMU_KERNEL_IDX: + case MMU_KSMAP_IDX: if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) && (ptep & PG_USER_MASK)) { goto do_fault_protect; @@ -765,7 +765,7 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, } break; - case MMU_KERNEL_IDX: + case MMU_KSMAP_IDX: if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) && (pde & PG_USER_MASK)) { goto do_fault_protect; @@ -822,7 +822,7 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, } break; - case MMU_KERNEL_IDX: + case MMU_KSMAP_IDX: if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) && (ptep & PG_USER_MASK)) { goto do_fault_protect; diff --git a/target-i386/seg_helper.c b/target-i386/seg_helper.c index 31c5508..934cc2b 100644 --- a/target-i386/seg_helper.c +++ b/target-i386/seg_helper.c @@ -34,6 +34,24 @@ # define LOG_PCALL_STATE(cpu) do { } while (0) #endif +#ifndef CONFIG_USER_ONLY +#define CPU_MMU_INDEX (cpu_mmu_index_kernel(env)) +#define MEMSUFFIX _kernel +#define DATA_SIZE 1 +#include "exec/cpu_ldst_template.h" + +#define DATA_SIZE 2 +#include "exec/cpu_ldst_template.h" + +#define DATA_SIZE 4 +#include "exec/cpu_ldst_template.h" + +#define DATA_SIZE 8 +#include "exec/cpu_ldst_template.h" +#undef CPU_MMU_INDEX +#undef MEMSUFFIX +#endif + /* return non zero if error */ static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr, uint32_t *e2_ptr, int selector) -- cgit v1.1 From f57584dc874f0ba92403b4ade631c232564fb027 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 28 Mar 2014 11:49:20 +0100 Subject: target-i386: simplify SMAP handling in MMU_KSMAP_IDX Do not use this MMU index at all if CR4.SMAP is false, and drop the SMAP check from x86_cpu_handle_mmu_fault. Signed-off-by: Paolo Bonzini --- target-i386/cpu.h | 2 +- target-i386/helper.c | 12 ++++-------- 2 files changed, 5 insertions(+), 9 deletions(-) (limited to 'target-i386') diff --git a/target-i386/cpu.h b/target-i386/cpu.h index b80df66..f2d5b19 100644 --- a/target-i386/cpu.h +++ b/target-i386/cpu.h @@ -1162,7 +1162,7 @@ static inline CPUX86State *cpu_init(const char *cpu_model) static inline int cpu_mmu_index(CPUX86State *env) { return (env->hflags & HF_CPL_MASK) == 3 ? MMU_USER_IDX : - ((env->hflags & HF_SMAP_MASK) && (env->eflags & AC_MASK)) + (!(env->hflags & HF_SMAP_MASK) || (env->eflags & AC_MASK)) ? MMU_KNOSMAP_IDX : MMU_KSMAP_IDX; } diff --git a/target-i386/helper.c b/target-i386/helper.c index 6d8e350..d10de26 100644 --- a/target-i386/helper.c +++ b/target-i386/helper.c @@ -649,8 +649,7 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, break; case MMU_KSMAP_IDX: - if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) && - (ptep & PG_USER_MASK)) { + if (is_write1 != 2 && (ptep & PG_USER_MASK)) { goto do_fault_protect; } /* fall through */ @@ -711,8 +710,7 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, break; case MMU_KSMAP_IDX: - if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) && - (ptep & PG_USER_MASK)) { + if (is_write1 != 2 && (ptep & PG_USER_MASK)) { goto do_fault_protect; } /* fall through */ @@ -766,8 +764,7 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, break; case MMU_KSMAP_IDX: - if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) && - (pde & PG_USER_MASK)) { + if (is_write1 != 2 && (pde & PG_USER_MASK)) { goto do_fault_protect; } /* fall through */ @@ -823,8 +820,7 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, break; case MMU_KSMAP_IDX: - if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) && - (ptep & PG_USER_MASK)) { + if (is_write1 != 2 && (ptep & PG_USER_MASK)) { goto do_fault_protect; } /* fall through */ -- cgit v1.1 From 843408b3cf0a8891b2cbe9e775cc7c6a376fd6c4 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 4 Apr 2014 08:19:55 +0200 Subject: target-i386: fix coding standards in x86_cpu_handle_mmu_fault Signed-off-by: Paolo Bonzini --- target-i386/helper.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) (limited to 'target-i386') diff --git a/target-i386/helper.c b/target-i386/helper.c index d10de26..7b3819b 100644 --- a/target-i386/helper.c +++ b/target-i386/helper.c @@ -670,8 +670,9 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, is_dirty = is_write && !(pde & PG_DIRTY_MASK); if (!(pde & PG_ACCESSED_MASK) || is_dirty) { pde |= PG_ACCESSED_MASK; - if (is_dirty) + if (is_dirty) { pde |= PG_DIRTY_MASK; + } stl_phys_notdirty(cs->as, pde_addr, pde); } /* align to page_size */ @@ -731,8 +732,9 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, is_dirty = is_write && !(pte & PG_DIRTY_MASK); if (!(pte & PG_ACCESSED_MASK) || is_dirty) { pte |= PG_ACCESSED_MASK; - if (is_dirty) + if (is_dirty) { pte |= PG_DIRTY_MASK; + } stl_phys_notdirty(cs->as, pte_addr, pte); } page_size = 4096; @@ -785,12 +787,13 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, is_dirty = is_write && !(pde & PG_DIRTY_MASK); if (!(pde & PG_ACCESSED_MASK) || is_dirty) { pde |= PG_ACCESSED_MASK; - if (is_dirty) + if (is_dirty) { pde |= PG_DIRTY_MASK; + } stl_phys_notdirty(cs->as, pde_addr, pde); } - pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */ + pte = pde & ~((page_size - 1) & ~0xfff); /* align to page_size */ ptep = pte; virt_addr = addr & ~(page_size - 1); } else { @@ -841,8 +844,9 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, is_dirty = is_write && !(pte & PG_DIRTY_MASK); if (!(pte & PG_ACCESSED_MASK) || is_dirty) { pte |= PG_ACCESSED_MASK; - if (is_dirty) + if (is_dirty) { pte |= PG_DIRTY_MASK; + } stl_phys_notdirty(cs->as, pte_addr, pte); } page_size = 4096; -- cgit v1.1 From 00cc3e1d70105ae1da11aee901b9af0c546bce4c Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 4 Apr 2014 07:51:29 +0200 Subject: target-i386: commonize checks for 2MB and 4KB pages Signed-off-by: Paolo Bonzini --- target-i386/helper.c | 127 ++++++++++++++++++--------------------------------- 1 file changed, 44 insertions(+), 83 deletions(-) (limited to 'target-i386') diff --git a/target-i386/helper.c b/target-i386/helper.c index 7b3819b..cf18336 100644 --- a/target-i386/helper.c +++ b/target-i386/helper.c @@ -634,50 +634,8 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, if (pde & PG_PSE_MASK) { /* 2 MB page */ page_size = 2048 * 1024; - ptep ^= PG_NX_MASK; - if ((ptep & PG_NX_MASK) && is_write1 == 2) { - goto do_fault_protect; - } - switch (mmu_idx) { - case MMU_USER_IDX: - if (!(ptep & PG_USER_MASK)) { - goto do_fault_protect; - } - if (is_write && !(ptep & PG_RW_MASK)) { - goto do_fault_protect; - } - break; - - case MMU_KSMAP_IDX: - if (is_write1 != 2 && (ptep & PG_USER_MASK)) { - goto do_fault_protect; - } - /* fall through */ - case MMU_KNOSMAP_IDX: - if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) && - (ptep & PG_USER_MASK)) { - goto do_fault_protect; - } - if ((env->cr[0] & CR0_WP_MASK) && - is_write && !(ptep & PG_RW_MASK)) { - goto do_fault_protect; - } - break; - - default: /* cannot happen */ - break; - } - is_dirty = is_write && !(pde & PG_DIRTY_MASK); - if (!(pde & PG_ACCESSED_MASK) || is_dirty) { - pde |= PG_ACCESSED_MASK; - if (is_dirty) { - pde |= PG_DIRTY_MASK; - } - stl_phys_notdirty(cs->as, pde_addr, pde); - } - /* align to page_size */ - pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff); - virt_addr = addr & ~(page_size - 1); + pte_addr = pde_addr; + pte = pde; } else { /* 4 KB page */ if (!(pde & PG_ACCESSED_MASK)) { @@ -697,50 +655,53 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, } /* combine pde and pte nx, user and rw protections */ ptep &= pte ^ PG_NX_MASK; - ptep ^= PG_NX_MASK; - if ((ptep & PG_NX_MASK) && is_write1 == 2) - goto do_fault_protect; - switch (mmu_idx) { - case MMU_USER_IDX: - if (!(ptep & PG_USER_MASK)) { - goto do_fault_protect; - } - if (is_write && !(ptep & PG_RW_MASK)) { - goto do_fault_protect; - } - break; + page_size = 4096; + } - case MMU_KSMAP_IDX: - if (is_write1 != 2 && (ptep & PG_USER_MASK)) { - goto do_fault_protect; - } - /* fall through */ - case MMU_KNOSMAP_IDX: - if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) && - (ptep & PG_USER_MASK)) { - goto do_fault_protect; - } - if ((env->cr[0] & CR0_WP_MASK) && - is_write && !(ptep & PG_RW_MASK)) { - goto do_fault_protect; - } - break; + ptep ^= PG_NX_MASK; + if ((ptep & PG_NX_MASK) && is_write1 == 2) { + goto do_fault_protect; + } + switch (mmu_idx) { + case MMU_USER_IDX: + if (!(ptep & PG_USER_MASK)) { + goto do_fault_protect; + } + if (is_write && !(ptep & PG_RW_MASK)) { + goto do_fault_protect; + } + break; - default: /* cannot happen */ - break; + case MMU_KSMAP_IDX: + if (is_write1 != 2 && (ptep & PG_USER_MASK)) { + goto do_fault_protect; } - is_dirty = is_write && !(pte & PG_DIRTY_MASK); - if (!(pte & PG_ACCESSED_MASK) || is_dirty) { - pte |= PG_ACCESSED_MASK; - if (is_dirty) { - pte |= PG_DIRTY_MASK; - } - stl_phys_notdirty(cs->as, pte_addr, pte); + /* fall through */ + case MMU_KNOSMAP_IDX: + if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) && + (ptep & PG_USER_MASK)) { + goto do_fault_protect; } - page_size = 4096; - virt_addr = addr & ~0xfff; - pte = pte & (PHYS_ADDR_MASK | 0xfff); + if ((env->cr[0] & CR0_WP_MASK) && + is_write && !(ptep & PG_RW_MASK)) { + goto do_fault_protect; + } + break; + + default: /* cannot happen */ + break; + } + is_dirty = is_write && !(pte & PG_DIRTY_MASK); + if (!(pte & PG_ACCESSED_MASK) || is_dirty) { + pte |= PG_ACCESSED_MASK; + if (is_dirty) { + pte |= PG_DIRTY_MASK; + } + stl_phys_notdirty(cs->as, pte_addr, pte); } + /* align to page_size */ + pte &= ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff); + virt_addr = addr & ~(page_size - 1); } else { uint32_t pde; -- cgit v1.1 From 487cad8853c5c412d49a111c9a26aa09f0c396ad Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 4 Apr 2014 07:57:02 +0200 Subject: target-i386: commonize checks for 4MB and 4KB pages Signed-off-by: Paolo Bonzini --- target-i386/helper.c | 118 ++++++++++++++++++--------------------------------- 1 file changed, 41 insertions(+), 77 deletions(-) (limited to 'target-i386') diff --git a/target-i386/helper.c b/target-i386/helper.c index cf18336..5842531 100644 --- a/target-i386/helper.c +++ b/target-i386/helper.c @@ -716,47 +716,9 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, /* if PSE bit is set, then we use a 4MB page */ if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) { page_size = 4096 * 1024; - switch (mmu_idx) { - case MMU_USER_IDX: - if (!(pde & PG_USER_MASK)) { - goto do_fault_protect; - } - if (is_write && !(pde & PG_RW_MASK)) { - goto do_fault_protect; - } - break; - - case MMU_KSMAP_IDX: - if (is_write1 != 2 && (pde & PG_USER_MASK)) { - goto do_fault_protect; - } - /* fall through */ - case MMU_KNOSMAP_IDX: - if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) && - (pde & PG_USER_MASK)) { - goto do_fault_protect; - } - if ((env->cr[0] & CR0_WP_MASK) && - is_write && !(pde & PG_RW_MASK)) { - goto do_fault_protect; - } - break; - - default: /* cannot happen */ - break; - } - is_dirty = is_write && !(pde & PG_DIRTY_MASK); - if (!(pde & PG_ACCESSED_MASK) || is_dirty) { - pde |= PG_ACCESSED_MASK; - if (is_dirty) { - pde |= PG_DIRTY_MASK; - } - stl_phys_notdirty(cs->as, pde_addr, pde); - } - - pte = pde & ~((page_size - 1) & ~0xfff); /* align to page_size */ - ptep = pte; - virt_addr = addr & ~(page_size - 1); + ptep = pde; + pte_addr = pde_addr; + pte = pde; } else { if (!(pde & PG_ACCESSED_MASK)) { pde |= PG_ACCESSED_MASK; @@ -773,46 +735,48 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, } /* combine pde and pte user and rw protections */ ptep = pte & pde; - switch (mmu_idx) { - case MMU_USER_IDX: - if (!(ptep & PG_USER_MASK)) { - goto do_fault_protect; - } - if (is_write && !(ptep & PG_RW_MASK)) { - goto do_fault_protect; - } - break; - - case MMU_KSMAP_IDX: - if (is_write1 != 2 && (ptep & PG_USER_MASK)) { - goto do_fault_protect; - } - /* fall through */ - case MMU_KNOSMAP_IDX: - if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) && - (ptep & PG_USER_MASK)) { - goto do_fault_protect; - } - if ((env->cr[0] & CR0_WP_MASK) && - is_write && !(ptep & PG_RW_MASK)) { - goto do_fault_protect; - } - break; + page_size = 4096; + } + switch (mmu_idx) { + case MMU_USER_IDX: + if (!(ptep & PG_USER_MASK)) { + goto do_fault_protect; + } + if (is_write && !(ptep & PG_RW_MASK)) { + goto do_fault_protect; + } + break; - default: /* cannot happen */ - break; + case MMU_KSMAP_IDX: + if (is_write1 != 2 && (ptep & PG_USER_MASK)) { + goto do_fault_protect; } - is_dirty = is_write && !(pte & PG_DIRTY_MASK); - if (!(pte & PG_ACCESSED_MASK) || is_dirty) { - pte |= PG_ACCESSED_MASK; - if (is_dirty) { - pte |= PG_DIRTY_MASK; - } - stl_phys_notdirty(cs->as, pte_addr, pte); + /* fall through */ + case MMU_KNOSMAP_IDX: + if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) && + (ptep & PG_USER_MASK)) { + goto do_fault_protect; } - page_size = 4096; - virt_addr = addr & ~0xfff; + if ((env->cr[0] & CR0_WP_MASK) && + is_write && !(ptep & PG_RW_MASK)) { + goto do_fault_protect; + } + break; + + default: /* cannot happen */ + break; + } + is_dirty = is_write && !(pte & PG_DIRTY_MASK); + if (!(pte & PG_ACCESSED_MASK) || is_dirty) { + pte |= PG_ACCESSED_MASK; + if (is_dirty) { + pte |= PG_DIRTY_MASK; + } + stl_phys_notdirty(cs->as, pte_addr, pte); } + /* align to page_size */ + pte &= ~((page_size - 1) & ~0xfff); + virt_addr = addr & ~(page_size - 1); } /* the page can be put in the TLB */ prot = PAGE_READ; -- cgit v1.1 From 7c8225600648fa0b56135547844f1e529350510a Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 4 Apr 2014 08:00:40 +0200 Subject: target-i386: commonize checks for PAE and non-PAE Signed-off-by: Paolo Bonzini --- target-i386/helper.c | 120 ++++++++++++++++++--------------------------------- 1 file changed, 41 insertions(+), 79 deletions(-) (limited to 'target-i386') diff --git a/target-i386/helper.c b/target-i386/helper.c index 5842531..eae3e7e 100644 --- a/target-i386/helper.c +++ b/target-i386/helper.c @@ -659,49 +659,6 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, } ptep ^= PG_NX_MASK; - if ((ptep & PG_NX_MASK) && is_write1 == 2) { - goto do_fault_protect; - } - switch (mmu_idx) { - case MMU_USER_IDX: - if (!(ptep & PG_USER_MASK)) { - goto do_fault_protect; - } - if (is_write && !(ptep & PG_RW_MASK)) { - goto do_fault_protect; - } - break; - - case MMU_KSMAP_IDX: - if (is_write1 != 2 && (ptep & PG_USER_MASK)) { - goto do_fault_protect; - } - /* fall through */ - case MMU_KNOSMAP_IDX: - if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) && - (ptep & PG_USER_MASK)) { - goto do_fault_protect; - } - if ((env->cr[0] & CR0_WP_MASK) && - is_write && !(ptep & PG_RW_MASK)) { - goto do_fault_protect; - } - break; - - default: /* cannot happen */ - break; - } - is_dirty = is_write && !(pte & PG_DIRTY_MASK); - if (!(pte & PG_ACCESSED_MASK) || is_dirty) { - pte |= PG_ACCESSED_MASK; - if (is_dirty) { - pte |= PG_DIRTY_MASK; - } - stl_phys_notdirty(cs->as, pte_addr, pte); - } - /* align to page_size */ - pte &= ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff); - virt_addr = addr & ~(page_size - 1); } else { uint32_t pde; @@ -737,47 +694,52 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, ptep = pte & pde; page_size = 4096; } - switch (mmu_idx) { - case MMU_USER_IDX: - if (!(ptep & PG_USER_MASK)) { - goto do_fault_protect; - } - if (is_write && !(ptep & PG_RW_MASK)) { - goto do_fault_protect; - } - break; + } - case MMU_KSMAP_IDX: - if (is_write1 != 2 && (ptep & PG_USER_MASK)) { - goto do_fault_protect; - } - /* fall through */ - case MMU_KNOSMAP_IDX: - if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) && - (ptep & PG_USER_MASK)) { - goto do_fault_protect; - } - if ((env->cr[0] & CR0_WP_MASK) && - is_write && !(ptep & PG_RW_MASK)) { - goto do_fault_protect; - } - break; + if ((ptep & PG_NX_MASK) && is_write1 == 2) { + goto do_fault_protect; + } + switch (mmu_idx) { + case MMU_USER_IDX: + if (!(ptep & PG_USER_MASK)) { + goto do_fault_protect; + } + if (is_write && !(ptep & PG_RW_MASK)) { + goto do_fault_protect; + } + break; - default: /* cannot happen */ - break; + case MMU_KSMAP_IDX: + if (is_write1 != 2 && (ptep & PG_USER_MASK)) { + goto do_fault_protect; } - is_dirty = is_write && !(pte & PG_DIRTY_MASK); - if (!(pte & PG_ACCESSED_MASK) || is_dirty) { - pte |= PG_ACCESSED_MASK; - if (is_dirty) { - pte |= PG_DIRTY_MASK; - } - stl_phys_notdirty(cs->as, pte_addr, pte); + /* fall through */ + case MMU_KNOSMAP_IDX: + if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) && + (ptep & PG_USER_MASK)) { + goto do_fault_protect; + } + if ((env->cr[0] & CR0_WP_MASK) && + is_write && !(ptep & PG_RW_MASK)) { + goto do_fault_protect; + } + break; + + default: /* cannot happen */ + break; + } + is_dirty = is_write && !(pte & PG_DIRTY_MASK); + if (!(pte & PG_ACCESSED_MASK) || is_dirty) { + pte |= PG_ACCESSED_MASK; + if (is_dirty) { + pte |= PG_DIRTY_MASK; } - /* align to page_size */ - pte &= ~((page_size - 1) & ~0xfff); - virt_addr = addr & ~(page_size - 1); + stl_phys_notdirty(cs->as, pte_addr, pte); } + /* align to page_size */ + pte &= ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff); + virt_addr = addr & ~(page_size - 1); + /* the page can be put in the TLB */ prot = PAGE_READ; if (!(ptep & PG_NX_MASK)) -- cgit v1.1 From 870a706735e8dc18c331bce0cdad2fe71c21ef2e Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 4 Apr 2014 08:04:18 +0200 Subject: target-i386: tweak handling of PG_NX_MASK Remove the tail of the PAE case, so that we can use "goto" in the next patch to jump to the protection checks. Signed-off-by: Paolo Bonzini --- target-i386/helper.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'target-i386') diff --git a/target-i386/helper.c b/target-i386/helper.c index eae3e7e..1460a23 100644 --- a/target-i386/helper.c +++ b/target-i386/helper.c @@ -657,8 +657,6 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, ptep &= pte ^ PG_NX_MASK; page_size = 4096; } - - ptep ^= PG_NX_MASK; } else { uint32_t pde; @@ -670,10 +668,11 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, error_code = 0; goto do_fault; } + ptep = pde | PG_NX_MASK; + /* if PSE bit is set, then we use a 4MB page */ if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) { page_size = 4096 * 1024; - ptep = pde; pte_addr = pde_addr; pte = pde; } else { @@ -691,11 +690,12 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, goto do_fault; } /* combine pde and pte user and rw protections */ - ptep = pte & pde; + ptep &= pte | PG_NX_MASK; page_size = 4096; } } + ptep ^= PG_NX_MASK; if ((ptep & PG_NX_MASK) && is_write1 == 2) { goto do_fault_protect; } -- cgit v1.1 From b052e4509bd4049c25890c56f603995c6763e761 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 4 Apr 2014 08:11:48 +0200 Subject: target-i386: introduce do_check_protect label This will help adding 1GB page support in the next patch. Signed-off-by: Paolo Bonzini --- target-i386/helper.c | 74 +++++++++++++++++++++++++++------------------------- 1 file changed, 38 insertions(+), 36 deletions(-) (limited to 'target-i386') diff --git a/target-i386/helper.c b/target-i386/helper.c index 1460a23..d09e1c8 100644 --- a/target-i386/helper.c +++ b/target-i386/helper.c @@ -636,27 +636,27 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, page_size = 2048 * 1024; pte_addr = pde_addr; pte = pde; - } else { - /* 4 KB page */ - if (!(pde & PG_ACCESSED_MASK)) { - pde |= PG_ACCESSED_MASK; - stl_phys_notdirty(cs->as, pde_addr, pde); - } - pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) & - env->a20_mask; - pte = ldq_phys(cs->as, pte_addr); - if (!(pte & PG_PRESENT_MASK)) { - error_code = 0; - goto do_fault; - } - if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) { - error_code = PG_ERROR_RSVD_MASK; - goto do_fault; - } - /* combine pde and pte nx, user and rw protections */ - ptep &= pte ^ PG_NX_MASK; - page_size = 4096; + goto do_check_protect; + } + /* 4 KB page */ + if (!(pde & PG_ACCESSED_MASK)) { + pde |= PG_ACCESSED_MASK; + stl_phys_notdirty(cs->as, pde_addr, pde); + } + pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) & + env->a20_mask; + pte = ldq_phys(cs->as, pte_addr); + if (!(pte & PG_PRESENT_MASK)) { + error_code = 0; + goto do_fault; + } + if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) { + error_code = PG_ERROR_RSVD_MASK; + goto do_fault; } + /* combine pde and pte nx, user and rw protections */ + ptep &= pte ^ PG_NX_MASK; + page_size = 4096; } else { uint32_t pde; @@ -675,26 +675,28 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, page_size = 4096 * 1024; pte_addr = pde_addr; pte = pde; - } else { - if (!(pde & PG_ACCESSED_MASK)) { - pde |= PG_ACCESSED_MASK; - stl_phys_notdirty(cs->as, pde_addr, pde); - } + goto do_check_protect; + } - /* page directory entry */ - pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & - env->a20_mask; - pte = ldl_phys(cs->as, pte_addr); - if (!(pte & PG_PRESENT_MASK)) { - error_code = 0; - goto do_fault; - } - /* combine pde and pte user and rw protections */ - ptep &= pte | PG_NX_MASK; - page_size = 4096; + if (!(pde & PG_ACCESSED_MASK)) { + pde |= PG_ACCESSED_MASK; + stl_phys_notdirty(cs->as, pde_addr, pde); } + + /* page directory entry */ + pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & + env->a20_mask; + pte = ldl_phys(cs->as, pte_addr); + if (!(pte & PG_PRESENT_MASK)) { + error_code = 0; + goto do_fault; + } + /* combine pde and pte user and rw protections */ + ptep &= pte | PG_NX_MASK; + page_size = 4096; } +do_check_protect: ptep ^= PG_NX_MASK; if ((ptep & PG_NX_MASK) && is_write1 == 2) { goto do_fault_protect; -- cgit v1.1 From 77549a7809c3c9c53dd2573187324ba9d4bd3b42 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 4 Apr 2014 08:12:28 +0200 Subject: target-i386: introduce support for 1 GB pages Given the simplifications to the code in the previous patches, this is now very simple to do. Signed-off-by: Paolo Bonzini --- target-i386/cpu.c | 4 +--- target-i386/helper.c | 7 +++++++ 2 files changed, 8 insertions(+), 3 deletions(-) (limited to 'target-i386') diff --git a/target-i386/cpu.c b/target-i386/cpu.c index 042a48d..0f400d4 100644 --- a/target-i386/cpu.c +++ b/target-i386/cpu.c @@ -569,9 +569,7 @@ struct X86CPUDefinition { CPUID_EXT_RDRAND */ #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \ CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \ - CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT) - /* missing: - CPUID_EXT2_PDPE1GB */ + CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB) #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \ CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A) #define TCG_SVM_FEATURES 0 diff --git a/target-i386/helper.c b/target-i386/helper.c index d09e1c8..5a50364 100644 --- a/target-i386/helper.c +++ b/target-i386/helper.c @@ -605,6 +605,13 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, pdpe |= PG_ACCESSED_MASK; stl_phys_notdirty(cs->as, pdpe_addr, pdpe); } + if (pdpe & PG_PSE_MASK) { + /* 1 GB page */ + page_size = 1024 * 1024 * 1024; + pte_addr = pdpe_addr; + pte = pdpe; + goto do_check_protect; + } } else #endif { -- cgit v1.1 From c1eb2fa3fd5c811dd35d26f8b17551cc1171d8de Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 27 May 2014 12:16:08 +0200 Subject: target-i386: set correct error code for reserved bit access The correct error code is 9 (present, reserved), not 8. Signed-off-by: Paolo Bonzini --- target-i386/helper.c | 26 +++++++++----------------- 1 file changed, 9 insertions(+), 17 deletions(-) (limited to 'target-i386') diff --git a/target-i386/helper.c b/target-i386/helper.c index 5a50364..a8e4088 100644 --- a/target-i386/helper.c +++ b/target-i386/helper.c @@ -530,7 +530,8 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, CPUX86State *env = &cpu->env; uint64_t ptep, pte; target_ulong pde_addr, pte_addr; - int error_code, is_dirty, prot, page_size, is_write, is_user; + int error_code = 0; + int is_dirty, prot, page_size, is_write, is_user; hwaddr paddr; uint32_t page_offset; target_ulong vaddr, virt_addr; @@ -577,12 +578,10 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, env->a20_mask; pml4e = ldq_phys(cs->as, pml4e_addr); if (!(pml4e & PG_PRESENT_MASK)) { - error_code = 0; goto do_fault; } if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) { - error_code = PG_ERROR_RSVD_MASK; - goto do_fault; + goto do_fault_rsvd; } if (!(pml4e & PG_ACCESSED_MASK)) { pml4e |= PG_ACCESSED_MASK; @@ -593,12 +592,10 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, env->a20_mask; pdpe = ldq_phys(cs->as, pdpe_addr); if (!(pdpe & PG_PRESENT_MASK)) { - error_code = 0; goto do_fault; } if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) { - error_code = PG_ERROR_RSVD_MASK; - goto do_fault; + goto do_fault_rsvd; } ptep &= pdpe ^ PG_NX_MASK; if (!(pdpe & PG_ACCESSED_MASK)) { @@ -620,7 +617,6 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, env->a20_mask; pdpe = ldq_phys(cs->as, pdpe_addr); if (!(pdpe & PG_PRESENT_MASK)) { - error_code = 0; goto do_fault; } ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK; @@ -630,12 +626,10 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, env->a20_mask; pde = ldq_phys(cs->as, pde_addr); if (!(pde & PG_PRESENT_MASK)) { - error_code = 0; goto do_fault; } if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) { - error_code = PG_ERROR_RSVD_MASK; - goto do_fault; + goto do_fault_rsvd; } ptep &= pde ^ PG_NX_MASK; if (pde & PG_PSE_MASK) { @@ -654,12 +648,10 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, env->a20_mask; pte = ldq_phys(cs->as, pte_addr); if (!(pte & PG_PRESENT_MASK)) { - error_code = 0; goto do_fault; } if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) { - error_code = PG_ERROR_RSVD_MASK; - goto do_fault; + goto do_fault_rsvd; } /* combine pde and pte nx, user and rw protections */ ptep &= pte ^ PG_NX_MASK; @@ -672,7 +664,6 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, env->a20_mask; pde = ldl_phys(cs->as, pde_addr); if (!(pde & PG_PRESENT_MASK)) { - error_code = 0; goto do_fault; } ptep = pde | PG_NX_MASK; @@ -695,7 +686,6 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, env->a20_mask; pte = ldl_phys(cs->as, pte_addr); if (!(pte & PG_PRESENT_MASK)) { - error_code = 0; goto do_fault; } /* combine pde and pte user and rw protections */ @@ -776,8 +766,10 @@ do_check_protect: tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, page_size); return 0; + do_fault_rsvd: + error_code |= PG_ERROR_RSVD_MASK; do_fault_protect: - error_code = PG_ERROR_P_MASK; + error_code |= PG_ERROR_P_MASK; do_fault: error_code |= (is_write << PG_ERROR_W_BIT); if (is_user) -- cgit v1.1 From b728464ae823a220d3b698d2ce055b2ceec0e297 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 27 May 2014 12:39:23 +0200 Subject: target-i386: test reserved PS bit on PML4Es Signed-off-by: Paolo Bonzini --- target-i386/helper.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'target-i386') diff --git a/target-i386/helper.c b/target-i386/helper.c index a8e4088..787fbba 100644 --- a/target-i386/helper.c +++ b/target-i386/helper.c @@ -580,6 +580,9 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, if (!(pml4e & PG_PRESENT_MASK)) { goto do_fault; } + if (pml4e & PG_PSE_MASK) { + goto do_fault_rsvd; + } if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) { goto do_fault_rsvd; } -- cgit v1.1 From e8f6d00c30ed88910d0d985f4b2bf41654172ceb Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 27 May 2014 12:58:36 +0200 Subject: target-i386: raise page fault for reserved physical address bits Signed-off-by: Paolo Bonzini --- target-i386/cpu.h | 10 ++++++++++ target-i386/helper.c | 34 ++++++++++++++++++++++------------ 2 files changed, 32 insertions(+), 12 deletions(-) (limited to 'target-i386') diff --git a/target-i386/cpu.h b/target-i386/cpu.h index f2d5b19..8ceea8b 100644 --- a/target-i386/cpu.h +++ b/target-i386/cpu.h @@ -260,6 +260,8 @@ #define PG_DIRTY_MASK (1 << PG_DIRTY_BIT) #define PG_PSE_MASK (1 << PG_PSE_BIT) #define PG_GLOBAL_MASK (1 << PG_GLOBAL_BIT) +#define PG_ADDRESS_MASK 0x000ffffffffff000LL +#define PG_HI_RSVD_MASK (PG_ADDRESS_MASK & ~PHYS_ADDR_MASK) #define PG_HI_USER_MASK 0x7ff0000000000000LL #define PG_NX_MASK (1LL << PG_NX_BIT) @@ -1137,6 +1139,14 @@ uint64_t cpu_get_tsc(CPUX86State *env); #define TARGET_VIRT_ADDR_SPACE_BITS 32 #endif +/* XXX: This value should match the one returned by CPUID + * and in exec.c */ +# if defined(TARGET_X86_64) +# define PHYS_ADDR_MASK 0xffffffffffLL +# else +# define PHYS_ADDR_MASK 0xfffffffffLL +# endif + static inline CPUX86State *cpu_init(const char *cpu_model) { X86CPU *cpu = cpu_x86_init(cpu_model); diff --git a/target-i386/helper.c b/target-i386/helper.c index 787fbba..c52eb5a 100644 --- a/target-i386/helper.c +++ b/target-i386/helper.c @@ -510,14 +510,6 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, #else -/* XXX: This value should match the one returned by CPUID - * and in exec.c */ -# if defined(TARGET_X86_64) -# define PHYS_ADDR_MASK 0xfffffff000LL -# else -# define PHYS_ADDR_MASK 0xffffff000LL -# endif - /* return value: * -1 = cannot handle fault * 0 = nothing more to do @@ -533,6 +525,7 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, int error_code = 0; int is_dirty, prot, page_size, is_write, is_user; hwaddr paddr; + uint64_t rsvd_mask = PG_HI_RSVD_MASK; uint32_t page_offset; target_ulong vaddr, virt_addr; @@ -580,7 +573,7 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, if (!(pml4e & PG_PRESENT_MASK)) { goto do_fault; } - if (pml4e & PG_PSE_MASK) { + if (pml4e & (rsvd_mask | PG_PSE_MASK)) { goto do_fault_rsvd; } if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) { @@ -591,12 +584,15 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, stl_phys_notdirty(cs->as, pml4e_addr, pml4e); } ptep = pml4e ^ PG_NX_MASK; - pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) & + pdpe_addr = ((pml4e & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3)) & env->a20_mask; pdpe = ldq_phys(cs->as, pdpe_addr); if (!(pdpe & PG_PRESENT_MASK)) { goto do_fault; } + if (pdpe & rsvd_mask) { + goto do_fault_rsvd; + } if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) { goto do_fault_rsvd; } @@ -622,15 +618,22 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, if (!(pdpe & PG_PRESENT_MASK)) { goto do_fault; } + rsvd_mask |= PG_HI_USER_MASK | PG_NX_MASK; + if (pdpe & rsvd_mask) { + goto do_fault_rsvd; + } ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK; } - pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) & + pde_addr = ((pdpe & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3)) & env->a20_mask; pde = ldq_phys(cs->as, pde_addr); if (!(pde & PG_PRESENT_MASK)) { goto do_fault; } + if (pde & rsvd_mask) { + goto do_fault_rsvd; + } if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) { goto do_fault_rsvd; } @@ -647,12 +650,15 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, pde |= PG_ACCESSED_MASK; stl_phys_notdirty(cs->as, pde_addr, pde); } - pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) & + pte_addr = ((pde & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3)) & env->a20_mask; pte = ldq_phys(cs->as, pte_addr); if (!(pte & PG_PRESENT_MASK)) { goto do_fault; } + if (pte & rsvd_mask) { + goto do_fault_rsvd; + } if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) { goto do_fault_rsvd; } @@ -694,9 +700,13 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, /* combine pde and pte user and rw protections */ ptep &= pte | PG_NX_MASK; page_size = 4096; + rsvd_mask = 0; } do_check_protect: + if (pte & rsvd_mask) { + goto do_fault_rsvd; + } ptep ^= PG_NX_MASK; if ((ptep & PG_NX_MASK) && is_write1 == 2) { goto do_fault_protect; -- cgit v1.1 From e7e898a76aa00e2238b119ed2910442b1c3cacdd Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 27 May 2014 14:44:49 +0200 Subject: target-i386: simplify pte/vaddr calculation They can moved to after the dirty bit processing, and unified between CR0.PG=1 and CR0.PG=0. Signed-off-by: Paolo Bonzini --- target-i386/helper.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) (limited to 'target-i386') diff --git a/target-i386/helper.c b/target-i386/helper.c index c52eb5a..153a91b 100644 --- a/target-i386/helper.c +++ b/target-i386/helper.c @@ -527,7 +527,7 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, hwaddr paddr; uint64_t rsvd_mask = PG_HI_RSVD_MASK; uint32_t page_offset; - target_ulong vaddr, virt_addr; + target_ulong vaddr; is_user = mmu_idx == MMU_USER_IDX; #if defined(DEBUG_MMU) @@ -544,7 +544,6 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, pte = (uint32_t)pte; } #endif - virt_addr = addr & TARGET_PAGE_MASK; prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; page_size = 4096; goto do_mapping; @@ -748,9 +747,6 @@ do_check_protect: } stl_phys_notdirty(cs->as, pte_addr, pte); } - /* align to page_size */ - pte &= ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff); - virt_addr = addr & ~(page_size - 1); /* the page can be put in the TLB */ prot = PAGE_READ; @@ -771,11 +767,14 @@ do_check_protect: do_mapping: pte = pte & env->a20_mask; + /* align to page_size */ + pte &= PG_ADDRESS_MASK & ~(page_size - 1); + /* Even if 4MB pages, we map only one 4KB page in the cache to avoid filling it too fast */ - page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1); - paddr = (pte & TARGET_PAGE_MASK) + page_offset; - vaddr = virt_addr + page_offset; + vaddr = addr & TARGET_PAGE_MASK; + page_offset = vaddr & (page_size - 1); + paddr = pte + page_offset; tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, page_size); return 0; -- cgit v1.1 From e2a32ebbfe899a32a6b063f0f9e7c2593267ea88 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 27 May 2014 13:58:46 +0200 Subject: target-i386: unify reserved bits and NX bit check Signed-off-by: Paolo Bonzini --- target-i386/helper.c | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) (limited to 'target-i386') diff --git a/target-i386/helper.c b/target-i386/helper.c index 153a91b..a2e8bd1 100644 --- a/target-i386/helper.c +++ b/target-i386/helper.c @@ -549,6 +549,10 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, goto do_mapping; } + if (!(env->efer & MSR_EFER_NXE)) { + rsvd_mask |= PG_NX_MASK; + } + if (env->cr[4] & CR4_PAE_MASK) { uint64_t pde, pdpe; target_ulong pdpe_addr; @@ -575,9 +579,6 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, if (pml4e & (rsvd_mask | PG_PSE_MASK)) { goto do_fault_rsvd; } - if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) { - goto do_fault_rsvd; - } if (!(pml4e & PG_ACCESSED_MASK)) { pml4e |= PG_ACCESSED_MASK; stl_phys_notdirty(cs->as, pml4e_addr, pml4e); @@ -592,9 +593,6 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, if (pdpe & rsvd_mask) { goto do_fault_rsvd; } - if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) { - goto do_fault_rsvd; - } ptep &= pdpe ^ PG_NX_MASK; if (!(pdpe & PG_ACCESSED_MASK)) { pdpe |= PG_ACCESSED_MASK; @@ -633,9 +631,6 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, if (pde & rsvd_mask) { goto do_fault_rsvd; } - if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) { - goto do_fault_rsvd; - } ptep &= pde ^ PG_NX_MASK; if (pde & PG_PSE_MASK) { /* 2 MB page */ @@ -658,9 +653,6 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, if (pte & rsvd_mask) { goto do_fault_rsvd; } - if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) { - goto do_fault_rsvd; - } /* combine pde and pte nx, user and rw protections */ ptep &= pte ^ PG_NX_MASK; page_size = 4096; -- cgit v1.1 From eaad03e47206882229d184c83488142cba243917 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 27 May 2014 13:03:17 +0200 Subject: target-i386: raise page fault for reserved bits in large pages In large pages, bit 12 is for PAT, but bits starting at 13 are reserved. Signed-off-by: Paolo Bonzini --- target-i386/cpu.h | 2 ++ target-i386/helper.c | 1 + 2 files changed, 3 insertions(+) (limited to 'target-i386') diff --git a/target-i386/cpu.h b/target-i386/cpu.h index 8ceea8b..51959be 100644 --- a/target-i386/cpu.h +++ b/target-i386/cpu.h @@ -249,6 +249,7 @@ #define PG_DIRTY_BIT 6 #define PG_PSE_BIT 7 #define PG_GLOBAL_BIT 8 +#define PG_PSE_PAT_BIT 12 #define PG_NX_BIT 63 #define PG_PRESENT_MASK (1 << PG_PRESENT_BIT) @@ -260,6 +261,7 @@ #define PG_DIRTY_MASK (1 << PG_DIRTY_BIT) #define PG_PSE_MASK (1 << PG_PSE_BIT) #define PG_GLOBAL_MASK (1 << PG_GLOBAL_BIT) +#define PG_PSE_PAT_MASK (1 << PG_PSE_PAT_BIT) #define PG_ADDRESS_MASK 0x000ffffffffff000LL #define PG_HI_RSVD_MASK (PG_ADDRESS_MASK & ~PHYS_ADDR_MASK) #define PG_HI_USER_MASK 0x7ff0000000000000LL diff --git a/target-i386/helper.c b/target-i386/helper.c index a2e8bd1..94081e8 100644 --- a/target-i386/helper.c +++ b/target-i386/helper.c @@ -695,6 +695,7 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, } do_check_protect: + rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK; if (pte & rsvd_mask) { goto do_fault_rsvd; } -- cgit v1.1 From de431a655a7560d834e1187d6b30cb6b1946e90c Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 27 May 2014 12:31:28 +0200 Subject: target-i386: support long addresses for 4MB pages (PSE-36) 4MB pages can use 40-bit addresses by putting the higher 8 bits in bits 20-13 of the PDE. Bit 21 is reserved. Signed-off-by: Paolo Bonzini --- target-i386/cpu.c | 3 +-- target-i386/helper.c | 12 +++++++++--- 2 files changed, 10 insertions(+), 5 deletions(-) (limited to 'target-i386') diff --git a/target-i386/cpu.c b/target-i386/cpu.c index 0f400d4..c8ef936 100644 --- a/target-i386/cpu.c +++ b/target-i386/cpu.c @@ -552,8 +552,7 @@ struct X86CPUDefinition { CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \ CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS) /* partly implemented: - CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) - CPUID_PSE36 (needed for Solaris) */ + CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */ /* missing: CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */ #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \ diff --git a/target-i386/helper.c b/target-i386/helper.c index 94081e8..2b917ad 100644 --- a/target-i386/helper.c +++ b/target-i386/helper.c @@ -672,8 +672,13 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) { page_size = 4096 * 1024; pte_addr = pde_addr; - pte = pde; - goto do_check_protect; + + /* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved. + * Leave bits 20-13 in place for setting accessed/dirty bits below. + */ + pte = pde | ((pde & 0x1fe000) << (32 - 13)); + rsvd_mask = 0x200000; + goto do_check_protect_pse36; } if (!(pde & PG_ACCESSED_MASK)) { @@ -696,6 +701,7 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, do_check_protect: rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK; +do_check_protect_pse36: if (pte & rsvd_mask) { goto do_fault_rsvd; } @@ -882,7 +888,7 @@ hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) if (!(pde & PG_PRESENT_MASK)) return -1; if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) { - pte = pde & ~0x003ff000; /* align to 4MB */ + pte = pde | ((pde & 0x1fe000) << (32 - 13)); page_size = 4096 * 1024; } else { /* page directory entry */ -- cgit v1.1 From b09481de91cce94342bac3327bb7633c39ff8bf6 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 27 May 2014 13:24:54 +0200 Subject: target-i386: fix protection bits in the TLB for SMEP User pages must be marked as non-executable when running under SMEP; otherwise, fetching the page first and then calling it will fail. With this patch, all SMEP testcases in kvm-unit-tests now pass. Signed-off-by: Paolo Bonzini --- target-i386/helper.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'target-i386') diff --git a/target-i386/helper.c b/target-i386/helper.c index 2b917ad..04beaeb 100644 --- a/target-i386/helper.c +++ b/target-i386/helper.c @@ -749,8 +749,10 @@ do_check_protect_pse36: /* the page can be put in the TLB */ prot = PAGE_READ; - if (!(ptep & PG_NX_MASK)) + if (!(ptep & PG_NX_MASK) && + !((env->cr[4] & CR4_SMEP_MASK) && (ptep & PG_USER_MASK))) { prot |= PAGE_EXEC; + } if (pte & PG_DIRTY_MASK) { /* only set write access if already dirty... otherwise wait for dirty access */ -- cgit v1.1 From 16b96f82cdfcb185560c2f8ebfc731711e2ccb2d Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 27 May 2014 14:58:47 +0200 Subject: target-i386: cleanup x86_cpu_get_phys_page_debug Make the code a bit more similar to x86_cpu_handle_mmu_fault. Signed-off-by: Paolo Bonzini --- target-i386/helper.c | 35 +++++++++++++++++------------------ 1 file changed, 17 insertions(+), 18 deletions(-) (limited to 'target-i386') diff --git a/target-i386/helper.c b/target-i386/helper.c index 04beaeb..11ca864 100644 --- a/target-i386/helper.c +++ b/target-i386/helper.c @@ -811,7 +811,6 @@ hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) CPUX86State *env = &cpu->env; target_ulong pde_addr, pte_addr; uint64_t pte; - hwaddr paddr; uint32_t page_offset; int page_size; @@ -829,25 +828,24 @@ hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) /* test virtual address sign extension */ sext = (int64_t)addr >> 47; - if (sext != 0 && sext != -1) + if (sext != 0 && sext != -1) { return -1; - + } pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) & env->a20_mask; pml4e = ldq_phys(cs->as, pml4e_addr); - if (!(pml4e & PG_PRESENT_MASK)) + if (!(pml4e & PG_PRESENT_MASK)) { return -1; - - pdpe_addr = ((pml4e & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) + + } + pdpe_addr = ((pml4e & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3)) & env->a20_mask; pdpe = ldq_phys(cs->as, pdpe_addr); - if (!(pdpe & PG_PRESENT_MASK)) + if (!(pdpe & PG_PRESENT_MASK)) { return -1; - + } if (pdpe & PG_PSE_MASK) { page_size = 1024 * 1024 * 1024; - pte = pdpe & ~( (page_size - 1) & ~0xfff); - pte &= ~(PG_NX_MASK | PG_HI_USER_MASK); + pte = pdpe; goto out; } @@ -861,7 +859,7 @@ hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) return -1; } - pde_addr = ((pdpe & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) + + pde_addr = ((pdpe & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3)) & env->a20_mask; pde = ldq_phys(cs->as, pde_addr); if (!(pde & PG_PRESENT_MASK)) { @@ -870,17 +868,17 @@ hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) if (pde & PG_PSE_MASK) { /* 2 MB page */ page_size = 2048 * 1024; - pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */ + pte = pde; } else { /* 4 KB page */ - pte_addr = ((pde & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) + + pte_addr = ((pde & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3)) & env->a20_mask; page_size = 4096; pte = ldq_phys(cs->as, pte_addr); } - pte &= ~(PG_NX_MASK | PG_HI_USER_MASK); - if (!(pte & PG_PRESENT_MASK)) + if (!(pte & PG_PRESENT_MASK)) { return -1; + } } else { uint32_t pde; @@ -896,8 +894,9 @@ hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) /* page directory entry */ pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask; pte = ldl_phys(cs->as, pte_addr); - if (!(pte & PG_PRESENT_MASK)) + if (!(pte & PG_PRESENT_MASK)) { return -1; + } page_size = 4096; } pte = pte & env->a20_mask; @@ -906,9 +905,9 @@ hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) #ifdef TARGET_X86_64 out: #endif + pte &= PG_ADDRESS_MASK & ~(page_size - 1); page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1); - paddr = (pte & TARGET_PAGE_MASK) + page_offset; - return paddr; + return pte | page_offset; } void hw_breakpoint_insert(CPUX86State *env, int index) -- cgit v1.1