aboutsummaryrefslogtreecommitdiff
path: root/tcg/i386/tcg-target.c
diff options
context:
space:
mode:
authorRichard Henderson <rth@twiddle.net>2013-09-04 08:13:42 -0700
committerRichard Henderson <rth@twiddle.net>2013-10-12 16:19:19 -0700
commit7352ee546ce0aba261d0e64595eae6e74e75e49d (patch)
tree4b585ec3f958066d1cd384f19ff356e0bcedd7dc /tcg/i386/tcg-target.c
parent37c5d0d5d1ad4e603dc50411c85bad6f726357a2 (diff)
downloadqemu-7352ee546ce0aba261d0e64595eae6e74e75e49d.zip
qemu-7352ee546ce0aba261d0e64595eae6e74e75e49d.tar.gz
qemu-7352ee546ce0aba261d0e64595eae6e74e75e49d.tar.bz2
tcg-i386: Tidy softmmu routines
Pass two TCGReg to tcg_out_tlb_load, rather than idx+args. Move ldst_optimization routines just below tcg_out_tlb_load to avoid the need for forward declarations. Use TCGReg enum in preference to int where apprpriate. Signed-off-by: Richard Henderson <rth@twiddle.net>
Diffstat (limited to 'tcg/i386/tcg-target.c')
-rw-r--r--tcg/i386/tcg-target.c457
1 files changed, 208 insertions, 249 deletions
diff --git a/tcg/i386/tcg-target.c b/tcg/i386/tcg-target.c
index 1b86009..a7ff8a3 100644
--- a/tcg/i386/tcg-target.c
+++ b/tcg/i386/tcg-target.c
@@ -1043,22 +1043,10 @@ static const void * const qemu_st_helpers[4] = {
helper_ret_stq_mmu,
};
-static void add_qemu_ldst_label(TCGContext *s,
- int is_ld,
- int opc,
- int data_reg,
- int data_reg2,
- int addrlo_reg,
- int addrhi_reg,
- int mem_index,
- uint8_t *raddr,
- uint8_t **label_ptr);
-
/* Perform the TLB load and compare.
Inputs:
- ADDRLO_IDX contains the index into ARGS of the low part of the
- address; the high part of the address is at ADDR_LOW_IDX+1.
+ ADDRLO and ADDRHI contain the low and high part of the address.
MEM_INDEX and S_BITS are the memory context and log2 size of the load.
@@ -1076,14 +1064,12 @@ static void add_qemu_ldst_label(TCGContext *s,
First argument register is clobbered. */
-static inline void tcg_out_tlb_load(TCGContext *s, int addrlo_idx,
+static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
int mem_index, TCGMemOp s_bits,
- const TCGArg *args,
uint8_t **label_ptr, int which)
{
- const int addrlo = args[addrlo_idx];
- const int r0 = TCG_REG_L0;
- const int r1 = TCG_REG_L1;
+ const TCGReg r0 = TCG_REG_L0;
+ const TCGReg r1 = TCG_REG_L1;
TCGType ttype = TCG_TYPE_I32;
TCGType htype = TCG_TYPE_I32;
int trexw = 0, hrexw = 0;
@@ -1132,7 +1118,7 @@ static inline void tcg_out_tlb_load(TCGContext *s, int addrlo_idx,
if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
/* cmp 4(r0), addrhi */
- tcg_out_modrm_offset(s, OPC_CMP_GvEv, args[addrlo_idx+1], r0, 4);
+ tcg_out_modrm_offset(s, OPC_CMP_GvEv, addrhi, r0, 4);
/* jne slow_path */
tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
@@ -1146,6 +1132,183 @@ static inline void tcg_out_tlb_load(TCGContext *s, int addrlo_idx,
tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, r1, r0,
offsetof(CPUTLBEntry, addend) - which);
}
+
+/*
+ * Record the context of a call to the out of line helper code for the slow path
+ * for a load or store, so that we can later generate the correct helper code
+ */
+static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOp opc,
+ TCGReg datalo, TCGReg datahi,
+ TCGReg addrlo, TCGReg addrhi,
+ int mem_index, uint8_t *raddr,
+ uint8_t **label_ptr)
+{
+ TCGLabelQemuLdst *label = new_ldst_label(s);
+
+ label->is_ld = is_ld;
+ label->opc = opc;
+ label->datalo_reg = datalo;
+ label->datahi_reg = datahi;
+ label->addrlo_reg = addrlo;
+ label->addrhi_reg = addrhi;
+ label->mem_index = mem_index;
+ label->raddr = raddr;
+ label->label_ptr[0] = label_ptr[0];
+ if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
+ label->label_ptr[1] = label_ptr[1];
+ }
+}
+
+/*
+ * Generate code for the slow path for a load at the end of block
+ */
+static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
+{
+ TCGMemOp opc = l->opc;
+ TCGMemOp s_bits = opc & MO_SIZE;
+ TCGReg data_reg;
+ uint8_t **label_ptr = &l->label_ptr[0];
+
+ /* resolve label address */
+ *(uint32_t *)label_ptr[0] = (uint32_t)(s->code_ptr - label_ptr[0] - 4);
+ if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
+ *(uint32_t *)label_ptr[1] = (uint32_t)(s->code_ptr - label_ptr[1] - 4);
+ }
+
+ if (TCG_TARGET_REG_BITS == 32) {
+ int ofs = 0;
+
+ tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs);
+ ofs += 4;
+
+ tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs);
+ ofs += 4;
+
+ if (TARGET_LONG_BITS == 64) {
+ tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs);
+ ofs += 4;
+ }
+
+ tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, l->mem_index);
+ ofs += 4;
+
+ tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, (uintptr_t)l->raddr);
+ } else {
+ tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
+ /* The second argument is already loaded with addrlo. */
+ tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2],
+ l->mem_index);
+ tcg_out_movi(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[3],
+ (uintptr_t)l->raddr);
+ }
+
+ tcg_out_calli(s, (uintptr_t)qemu_ld_helpers[s_bits]);
+
+ data_reg = l->datalo_reg;
+ switch (opc & MO_SSIZE) {
+ case MO_SB:
+ tcg_out_ext8s(s, data_reg, TCG_REG_EAX, P_REXW);
+ break;
+ case MO_SW:
+ tcg_out_ext16s(s, data_reg, TCG_REG_EAX, P_REXW);
+ break;
+#if TCG_TARGET_REG_BITS == 64
+ case MO_SL:
+ tcg_out_ext32s(s, data_reg, TCG_REG_EAX);
+ break;
+#endif
+ case MO_UB:
+ case MO_UW:
+ /* Note that the helpers have zero-extended to tcg_target_long. */
+ case MO_UL:
+ tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX);
+ break;
+ case MO_Q:
+ if (TCG_TARGET_REG_BITS == 64) {
+ tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_RAX);
+ } else if (data_reg == TCG_REG_EDX) {
+ /* xchg %edx, %eax */
+ tcg_out_opc(s, OPC_XCHG_ax_r32 + TCG_REG_EDX, 0, 0, 0);
+ tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_EAX);
+ } else {
+ tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX);
+ tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_EDX);
+ }
+ break;
+ default:
+ tcg_abort();
+ }
+
+ /* Jump to the code corresponding to next IR of qemu_st */
+ tcg_out_jmp(s, (uintptr_t)l->raddr);
+}
+
+/*
+ * Generate code for the slow path for a store at the end of block
+ */
+static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
+{
+ TCGMemOp opc = l->opc;
+ TCGMemOp s_bits = opc & MO_SIZE;
+ uint8_t **label_ptr = &l->label_ptr[0];
+ TCGReg retaddr;
+
+ /* resolve label address */
+ *(uint32_t *)label_ptr[0] = (uint32_t)(s->code_ptr - label_ptr[0] - 4);
+ if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
+ *(uint32_t *)label_ptr[1] = (uint32_t)(s->code_ptr - label_ptr[1] - 4);
+ }
+
+ if (TCG_TARGET_REG_BITS == 32) {
+ int ofs = 0;
+
+ tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs);
+ ofs += 4;
+
+ tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs);
+ ofs += 4;
+
+ if (TARGET_LONG_BITS == 64) {
+ tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs);
+ ofs += 4;
+ }
+
+ tcg_out_st(s, TCG_TYPE_I32, l->datalo_reg, TCG_REG_ESP, ofs);
+ ofs += 4;
+
+ if (s_bits == MO_64) {
+ tcg_out_st(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_ESP, ofs);
+ ofs += 4;
+ }
+
+ tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, l->mem_index);
+ ofs += 4;
+
+ retaddr = TCG_REG_EAX;
+ tcg_out_movi(s, TCG_TYPE_I32, retaddr, (uintptr_t)l->raddr);
+ tcg_out_st(s, TCG_TYPE_I32, retaddr, TCG_REG_ESP, ofs);
+ } else {
+ tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
+ /* The second argument is already loaded with addrlo. */
+ tcg_out_mov(s, (s_bits == MO_64 ? TCG_TYPE_I64 : TCG_TYPE_I32),
+ tcg_target_call_iarg_regs[2], l->datalo_reg);
+ tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3],
+ l->mem_index);
+
+ if (ARRAY_SIZE(tcg_target_call_iarg_regs) > 4) {
+ retaddr = tcg_target_call_iarg_regs[4];
+ tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr);
+ } else {
+ retaddr = TCG_REG_RAX;
+ tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr);
+ tcg_out_st(s, TCG_TYPE_PTR, retaddr, TCG_REG_ESP, 0);
+ }
+ }
+
+ /* "Tail call" to the helper, with the return address back inline. */
+ tcg_out_push(s, retaddr);
+ tcg_out_jmp(s, (uintptr_t)qemu_st_helpers[s_bits]);
+}
#elif defined(__x86_64__) && defined(__linux__)
# include <asm/prctl.h>
# include <sys/prctl.h>
@@ -1250,46 +1413,36 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
common. */
static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, TCGMemOp opc)
{
- int data_reg, data_reg2 = 0;
- int addrlo_idx;
+ TCGReg datalo, datahi, addrlo;
#if defined(CONFIG_SOFTMMU)
+ TCGReg addrhi;
int mem_index;
TCGMemOp s_bits;
uint8_t *label_ptr[2];
#endif
- data_reg = args[0];
- addrlo_idx = 1;
- if (TCG_TARGET_REG_BITS == 32 && (opc & MO_SIZE) == MO_64) {
- data_reg2 = args[1];
- addrlo_idx = 2;
- }
+ datalo = *args++;
+ datahi = (TCG_TARGET_REG_BITS == 32 && opc == 3 ? *args++ : 0);
+ addrlo = *args++;
#if defined(CONFIG_SOFTMMU)
- mem_index = args[addrlo_idx + 1 + (TARGET_LONG_BITS > TCG_TARGET_REG_BITS)];
+ addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0);
+ mem_index = *args++;
s_bits = opc & MO_SIZE;
- tcg_out_tlb_load(s, addrlo_idx, mem_index, s_bits, args,
+ tcg_out_tlb_load(s, addrlo, addrhi, mem_index, s_bits,
label_ptr, offsetof(CPUTLBEntry, addr_read));
/* TLB Hit. */
- tcg_out_qemu_ld_direct(s, data_reg, data_reg2, TCG_REG_L1, 0, 0, opc);
+ tcg_out_qemu_ld_direct(s, datalo, datahi, TCG_REG_L1, 0, 0, opc);
/* Record the current context of a load into ldst label */
- add_qemu_ldst_label(s,
- 1,
- opc,
- data_reg,
- data_reg2,
- args[addrlo_idx],
- args[addrlo_idx + 1],
- mem_index,
- s->code_ptr,
- label_ptr);
+ add_qemu_ldst_label(s, 1, opc, datalo, datahi, addrlo, addrhi,
+ mem_index, s->code_ptr, label_ptr);
#else
{
int32_t offset = GUEST_BASE;
- int base = args[addrlo_idx];
+ TCGReg base = addrlo;
int seg = 0;
/* ??? We assume all operations have left us with register contents
@@ -1307,7 +1460,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, TCGMemOp opc)
offset = 0;
}
- tcg_out_qemu_ld_direct(s, data_reg, data_reg2, base, offset, seg, opc);
+ tcg_out_qemu_ld_direct(s, datalo, datahi, base, offset, seg, opc);
}
#endif
}
@@ -1374,46 +1527,36 @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, TCGMemOp opc)
{
- int data_reg, data_reg2 = 0;
- int addrlo_idx;
+ TCGReg datalo, datahi, addrlo;
#if defined(CONFIG_SOFTMMU)
+ TCGReg addrhi;
int mem_index;
TCGMemOp s_bits;
uint8_t *label_ptr[2];
#endif
- data_reg = args[0];
- addrlo_idx = 1;
- if (TCG_TARGET_REG_BITS == 32 && opc == 3) {
- data_reg2 = args[1];
- addrlo_idx = 2;
- }
+ datalo = *args++;
+ datahi = (TCG_TARGET_REG_BITS == 32 && opc == 3 ? *args++ : 0);
+ addrlo = *args++;
#if defined(CONFIG_SOFTMMU)
- mem_index = args[addrlo_idx + 1 + (TARGET_LONG_BITS > TCG_TARGET_REG_BITS)];
+ addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0);
+ mem_index = *args++;
s_bits = opc & MO_SIZE;
- tcg_out_tlb_load(s, addrlo_idx, mem_index, s_bits, args,
+ tcg_out_tlb_load(s, addrlo, addrhi, mem_index, s_bits,
label_ptr, offsetof(CPUTLBEntry, addr_write));
/* TLB Hit. */
- tcg_out_qemu_st_direct(s, data_reg, data_reg2, TCG_REG_L1, 0, 0, opc);
+ tcg_out_qemu_st_direct(s, datalo, datahi, TCG_REG_L1, 0, 0, opc);
/* Record the current context of a store into ldst label */
- add_qemu_ldst_label(s,
- 0,
- opc,
- data_reg,
- data_reg2,
- args[addrlo_idx],
- args[addrlo_idx + 1],
- mem_index,
- s->code_ptr,
- label_ptr);
+ add_qemu_ldst_label(s, 0, opc, datalo, datahi, addrlo, addrhi,
+ mem_index, s->code_ptr, label_ptr);
#else
{
int32_t offset = GUEST_BASE;
- int base = args[addrlo_idx];
+ TCGReg base = addrlo;
int seg = 0;
/* ??? We assume all operations have left us with register contents
@@ -1431,194 +1574,10 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, TCGMemOp opc)
offset = 0;
}
- tcg_out_qemu_st_direct(s, data_reg, data_reg2, base, offset, seg, opc);
- }
-#endif
-}
-
-#if defined(CONFIG_SOFTMMU)
-/*
- * Record the context of a call to the out of line helper code for the slow path
- * for a load or store, so that we can later generate the correct helper code
- */
-static void add_qemu_ldst_label(TCGContext *s,
- int is_ld,
- int opc,
- int data_reg,
- int data_reg2,
- int addrlo_reg,
- int addrhi_reg,
- int mem_index,
- uint8_t *raddr,
- uint8_t **label_ptr)
-{
- TCGLabelQemuLdst *label = new_ldst_label(s);
-
- label->is_ld = is_ld;
- label->opc = opc;
- label->datalo_reg = data_reg;
- label->datahi_reg = data_reg2;
- label->addrlo_reg = addrlo_reg;
- label->addrhi_reg = addrhi_reg;
- label->mem_index = mem_index;
- label->raddr = raddr;
- label->label_ptr[0] = label_ptr[0];
- if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
- label->label_ptr[1] = label_ptr[1];
- }
-}
-
-/*
- * Generate code for the slow path for a load at the end of block
- */
-static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
-{
- TCGMemOp opc = l->opc;
- TCGMemOp s_bits = opc & MO_SIZE;
- TCGReg data_reg;
- uint8_t **label_ptr = &l->label_ptr[0];
-
- /* resolve label address */
- *(uint32_t *)label_ptr[0] = (uint32_t)(s->code_ptr - label_ptr[0] - 4);
- if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
- *(uint32_t *)label_ptr[1] = (uint32_t)(s->code_ptr - label_ptr[1] - 4);
- }
-
- if (TCG_TARGET_REG_BITS == 32) {
- int ofs = 0;
-
- tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs);
- ofs += 4;
-
- tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs);
- ofs += 4;
-
- if (TARGET_LONG_BITS == 64) {
- tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs);
- ofs += 4;
- }
-
- tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, l->mem_index);
- ofs += 4;
-
- tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, (uintptr_t)l->raddr);
- } else {
- tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
- /* The second argument is already loaded with addrlo. */
- tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2],
- l->mem_index);
- tcg_out_movi(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[3],
- (uintptr_t)l->raddr);
+ tcg_out_qemu_st_direct(s, datalo, datahi, base, offset, seg, opc);
}
-
- tcg_out_calli(s, (uintptr_t)qemu_ld_helpers[s_bits]);
-
- data_reg = l->datalo_reg;
- switch (opc & MO_SSIZE) {
- case MO_SB:
- tcg_out_ext8s(s, data_reg, TCG_REG_EAX, P_REXW);
- break;
- case MO_SW:
- tcg_out_ext16s(s, data_reg, TCG_REG_EAX, P_REXW);
- break;
-#if TCG_TARGET_REG_BITS == 64
- case MO_SL:
- tcg_out_ext32s(s, data_reg, TCG_REG_EAX);
- break;
#endif
- case MO_UB:
- case MO_UW:
- /* Note that the helpers have zero-extended to tcg_target_long. */
- case MO_UL:
- tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX);
- break;
- case MO_Q:
- if (TCG_TARGET_REG_BITS == 64) {
- tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_RAX);
- } else if (data_reg == TCG_REG_EDX) {
- /* xchg %edx, %eax */
- tcg_out_opc(s, OPC_XCHG_ax_r32 + TCG_REG_EDX, 0, 0, 0);
- tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_EAX);
- } else {
- tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX);
- tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_EDX);
- }
- break;
- default:
- tcg_abort();
- }
-
- /* Jump to the code corresponding to next IR of qemu_st */
- tcg_out_jmp(s, (uintptr_t)l->raddr);
-}
-
-/*
- * Generate code for the slow path for a store at the end of block
- */
-static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
-{
- TCGMemOp opc = l->opc;
- TCGMemOp s_bits = opc & MO_SIZE;
- uint8_t **label_ptr = &l->label_ptr[0];
- TCGReg retaddr;
-
- /* resolve label address */
- *(uint32_t *)label_ptr[0] = (uint32_t)(s->code_ptr - label_ptr[0] - 4);
- if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
- *(uint32_t *)label_ptr[1] = (uint32_t)(s->code_ptr - label_ptr[1] - 4);
- }
-
- if (TCG_TARGET_REG_BITS == 32) {
- int ofs = 0;
-
- tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs);
- ofs += 4;
-
- tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs);
- ofs += 4;
-
- if (TARGET_LONG_BITS == 64) {
- tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs);
- ofs += 4;
- }
-
- tcg_out_st(s, TCG_TYPE_I32, l->datalo_reg, TCG_REG_ESP, ofs);
- ofs += 4;
-
- if (s_bits == MO_64) {
- tcg_out_st(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_ESP, ofs);
- ofs += 4;
- }
-
- tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, l->mem_index);
- ofs += 4;
-
- retaddr = TCG_REG_EAX;
- tcg_out_movi(s, TCG_TYPE_I32, retaddr, (uintptr_t)l->raddr);
- tcg_out_st(s, TCG_TYPE_I32, retaddr, TCG_REG_ESP, ofs);
- } else {
- tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
- /* The second argument is already loaded with addrlo. */
- tcg_out_mov(s, (s_bits == MO_64 ? TCG_TYPE_I64 : TCG_TYPE_I32),
- tcg_target_call_iarg_regs[2], l->datalo_reg);
- tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3],
- l->mem_index);
-
- if (ARRAY_SIZE(tcg_target_call_iarg_regs) > 4) {
- retaddr = tcg_target_call_iarg_regs[4];
- tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr);
- } else {
- retaddr = TCG_REG_RAX;
- tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr);
- tcg_out_st(s, TCG_TYPE_PTR, retaddr, TCG_REG_ESP, 0);
- }
- }
-
- /* "Tail call" to the helper, with the return address back inline. */
- tcg_out_push(s, retaddr);
- tcg_out_jmp(s, (uintptr_t)qemu_st_helpers[s_bits]);
}
-#endif /* CONFIG_SOFTMMU */
static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
const TCGArg *args, const int *const_args)