diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2024-06-11 22:04:56 +0200 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2024-07-16 18:18:24 +0200 |
commit | 0bd385e7e3c33e987d7a8879918be6df7b111ac4 (patch) | |
tree | ec66fef17e5abf5c6f38c60ef820bf2472dc08bd /target | |
parent | a7cf4949938743e9ecb73efcb51d27bd18d3c3fa (diff) | |
download | qemu-0bd385e7e3c33e987d7a8879918be6df7b111ac4.zip qemu-0bd385e7e3c33e987d7a8879918be6df7b111ac4.tar.gz qemu-0bd385e7e3c33e987d7a8879918be6df7b111ac4.tar.bz2 |
target/i386/tcg: Allow IRET from user mode to user mode with SMAP
This fixes a bug wherein i386/tcg assumed an interrupt return using
the IRET instruction was always returning from kernel mode to either
kernel mode or user mode. This assumption is violated when IRET is used
as a clever way to restore thread state, as for example in the dotnet
runtime. There, IRET returns from user mode to user mode.
This bug is that stack accesses from IRET and RETF, as well as accesses
to the parameters in a call gate, are normal data accesses using the
current CPL. This manifested itself as a page fault in the guest Linux
kernel due to SMAP preventing the access.
This bug appears to have been in QEMU since the beginning.
Analyzed-by: Robert R. Henry <rrh.henry@gmail.com>
Co-developed-by: Robert R. Henry <rrh.henry@gmail.com>
Signed-off-by: Robert R. Henry <rrh.henry@gmail.com>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'target')
-rw-r--r-- | target/i386/tcg/seg_helper.c | 18 |
1 files changed, 9 insertions, 9 deletions
diff --git a/target/i386/tcg/seg_helper.c b/target/i386/tcg/seg_helper.c index 19d6b41..224e73e 100644 --- a/target/i386/tcg/seg_helper.c +++ b/target/i386/tcg/seg_helper.c @@ -594,13 +594,13 @@ int exception_has_error_code(int intno) #define POPW_RA(ssp, sp, sp_mask, val, ra) \ { \ - val = cpu_lduw_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \ + val = cpu_lduw_data_ra(env, (ssp) + (sp & (sp_mask)), ra); \ sp += 2; \ } #define POPL_RA(ssp, sp, sp_mask, val, ra) \ { \ - val = (uint32_t)cpu_ldl_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \ + val = (uint32_t)cpu_ldl_data_ra(env, (ssp) + (sp & (sp_mask)), ra); \ sp += 4; \ } @@ -847,7 +847,7 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int, #define POPQ_RA(sp, val, ra) \ { \ - val = cpu_ldq_kernel_ra(env, sp, ra); \ + val = cpu_ldq_data_ra(env, sp, ra); \ sp += 8; \ } @@ -1797,18 +1797,18 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip, PUSHL_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC()); PUSHL_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC()); for (i = param_count - 1; i >= 0; i--) { - val = cpu_ldl_kernel_ra(env, old_ssp + - ((env->regs[R_ESP] + i * 4) & - old_sp_mask), GETPC()); + val = cpu_ldl_data_ra(env, + old_ssp + ((env->regs[R_ESP] + i * 4) & old_sp_mask), + GETPC()); PUSHL_RA(ssp, sp, sp_mask, val, GETPC()); } } else { PUSHW_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC()); PUSHW_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC()); for (i = param_count - 1; i >= 0; i--) { - val = cpu_lduw_kernel_ra(env, old_ssp + - ((env->regs[R_ESP] + i * 2) & - old_sp_mask), GETPC()); + val = cpu_lduw_data_ra(env, + old_ssp + ((env->regs[R_ESP] + i * 2) & old_sp_mask), + GETPC()); PUSHW_RA(ssp, sp, sp_mask, val, GETPC()); } } |