aboutsummaryrefslogtreecommitdiff
path: root/tcg/i386/tcg-target.c.inc
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2022-11-07 20:51:56 +1100
committerRichard Henderson <richard.henderson@linaro.org>2023-05-16 15:21:39 -0700
commit30cc7a7e912481b5ba03df370d638f7ff4d7d6e1 (patch)
tree16952f94f8de5df9a7bd2e2e5f93962f0b7f3b92 /tcg/i386/tcg-target.c.inc
parent1ce12a8c83f2dcd10aa324916dcf6f67cc58a882 (diff)
downloadqemu-30cc7a7e912481b5ba03df370d638f7ff4d7d6e1.zip
qemu-30cc7a7e912481b5ba03df370d638f7ff4d7d6e1.tar.gz
qemu-30cc7a7e912481b5ba03df370d638f7ff4d7d6e1.tar.bz2
tcg/i386: Use full load/store helpers in user-only mode
Instead of using helper_unaligned_{ld,st}, use the full load/store helpers. This will allow the fast path to increase alignment to implement atomicity while not immediately raising an alignment exception. Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'tcg/i386/tcg-target.c.inc')
-rw-r--r--tcg/i386/tcg-target.c.inc52
1 files changed, 4 insertions, 48 deletions
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
index 911123c..21553f3 100644
--- a/tcg/i386/tcg-target.c.inc
+++ b/tcg/i386/tcg-target.c.inc
@@ -1776,7 +1776,6 @@ typedef struct {
int seg;
} HostAddress;
-#if defined(CONFIG_SOFTMMU)
/*
* Because i686 has no register parameters and because x86_64 has xchg
* to handle addr/data register overlap, we have placed all input arguments
@@ -1812,7 +1811,7 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
/* resolve label address */
tcg_patch32(label_ptr[0], s->code_ptr - label_ptr[0] - 4);
- if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
+ if (label_ptr[1]) {
tcg_patch32(label_ptr[1], s->code_ptr - label_ptr[1] - 4);
}
@@ -1834,7 +1833,7 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
/* resolve label address */
tcg_patch32(label_ptr[0], s->code_ptr - label_ptr[0] - 4);
- if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
+ if (label_ptr[1]) {
tcg_patch32(label_ptr[1], s->code_ptr - label_ptr[1] - 4);
}
@@ -1844,51 +1843,8 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
tcg_out_jmp(s, l->raddr);
return true;
}
-#else
-static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l)
-{
- /* resolve label address */
- tcg_patch32(l->label_ptr[0], s->code_ptr - l->label_ptr[0] - 4);
-
- if (TCG_TARGET_REG_BITS == 32) {
- int ofs = 0;
-
- tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs);
- ofs += 4;
-
- tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs);
- ofs += 4;
- if (TARGET_LONG_BITS == 64) {
- tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs);
- ofs += 4;
- }
-
- tcg_out_pushi(s, (uintptr_t)l->raddr);
- } else {
- tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1],
- l->addrlo_reg);
- tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
-
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RAX, (uintptr_t)l->raddr);
- tcg_out_push(s, TCG_REG_RAX);
- }
-
- /* "Tail call" to the helper, with the return address back inline. */
- tcg_out_jmp(s, (const void *)(l->is_ld ? helper_unaligned_ld
- : helper_unaligned_st));
- return true;
-}
-
-static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
-{
- return tcg_out_fail_alignment(s, l);
-}
-
-static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
-{
- return tcg_out_fail_alignment(s, l);
-}
+#ifndef CONFIG_SOFTMMU
static HostAddress x86_guest_base = {
.index = -1
};
@@ -1920,7 +1876,7 @@ static inline int setup_guest_base_seg(void)
return 0;
}
#endif /* setup_guest_base_seg */
-#endif /* SOFTMMU */
+#endif /* !SOFTMMU */
/*
* For softmmu, perform the TLB load and compare.