aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--tcg/aarch64/tcg-target.c.inc3
-rw-r--r--tcg/i386/tcg-target.c.inc3
-rw-r--r--tcg/loongarch64/tcg-target.c.inc3
-rw-r--r--tcg/ppc/tcg-target.c.inc7
-rw-r--r--tcg/s390x/tcg-target.c.inc2
-rw-r--r--tcg/sparc64/tcg-target.c.inc5
-rw-r--r--tcg/tcg.c10
7 files changed, 19 insertions, 14 deletions
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
index 90af096..59e6a08 100644
--- a/tcg/aarch64/tcg-target.c.inc
+++ b/tcg/aarch64/tcg-target.c.inc
@@ -1918,7 +1918,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
switch (opc) {
case INDEX_op_goto_tb:
- qemu_build_assert(TCG_TARGET_HAS_direct_jump);
/*
* Ensure that ADRP+ADD are 8-byte aligned so that an atomic
* write can be used to patch the target address.
@@ -1926,7 +1925,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
if ((uintptr_t)s->code_ptr & 7) {
tcg_out32(s, NOP);
}
- s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
+ set_jmp_insn_offset(s, a0);
/*
* actual branch destination will be patched by
* tb_target_set_jmp_target later
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
index c4ff59e..6fb40fe 100644
--- a/tcg/i386/tcg-target.c.inc
+++ b/tcg/i386/tcg-target.c.inc
@@ -2383,7 +2383,6 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
switch (opc) {
case INDEX_op_goto_tb:
- qemu_build_assert(TCG_TARGET_HAS_direct_jump);
{
/*
* Jump displacement must be aligned for atomic patching;
@@ -2394,7 +2393,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
tcg_out_nopn(s, gap - 1);
}
tcg_out8(s, OPC_JMP_long); /* jmp im */
- s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
+ set_jmp_insn_offset(s, a0);
tcg_out32(s, 0);
}
set_jmp_reset_offset(s, a0);
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
index 5dd645f..bce7340 100644
--- a/tcg/loongarch64/tcg-target.c.inc
+++ b/tcg/loongarch64/tcg-target.c.inc
@@ -1090,7 +1090,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
switch (opc) {
case INDEX_op_goto_tb:
- qemu_build_assert(TCG_TARGET_HAS_direct_jump);
/*
* Ensure that patch area is 8-byte aligned so that an
* atomic write can be used to patch the target address.
@@ -1098,7 +1097,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
if ((uintptr_t)s->code_ptr & 7) {
tcg_out_nop(s);
}
- s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
+ set_jmp_insn_offset(s, a0);
/*
* actual branch destination will be patched by
* tb_target_set_jmp_target later
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
index b72e266..dbe8ccd 100644
--- a/tcg/ppc/tcg-target.c.inc
+++ b/tcg/ppc/tcg-target.c.inc
@@ -2630,20 +2630,19 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
switch (opc) {
case INDEX_op_goto_tb:
- qemu_build_assert(TCG_TARGET_HAS_direct_jump);
/* Direct jump. */
if (TCG_TARGET_REG_BITS == 64) {
/* Ensure the next insns are 8 or 16-byte aligned. */
while ((uintptr_t)s->code_ptr & (have_isa_2_07 ? 15 : 7)) {
tcg_out32(s, NOP);
}
- s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
+ set_jmp_insn_offset(s, args[0]);
tcg_out32(s, ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, 0));
tcg_out32(s, ADDI | TAI(TCG_REG_TB, TCG_REG_TB, 0));
} else {
- s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
+ set_jmp_insn_offset(s, args[0]);
tcg_out32(s, B);
- s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s);
+ set_jmp_reset_offset(s, args[0]);
break;
}
tcg_out32(s, MTSPR | RS(TCG_REG_TB) | CTR);
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
index 48a0c3e..c234347 100644
--- a/tcg/s390x/tcg-target.c.inc
+++ b/tcg/s390x/tcg-target.c.inc
@@ -1977,7 +1977,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
tcg_out16(s, NOP);
}
tcg_out16(s, RIL_BRCL | (S390_CC_ALWAYS << 4));
- s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
+ set_jmp_insn_offset(s, a0);
s->code_ptr += 2;
set_jmp_reset_offset(s, a0);
break;
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
index 26b00d1..c3109fe 100644
--- a/tcg/sparc64/tcg-target.c.inc
+++ b/tcg/sparc64/tcg-target.c.inc
@@ -1452,20 +1452,19 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
switch (opc) {
case INDEX_op_goto_tb:
- qemu_build_assert(TCG_TARGET_HAS_direct_jump);
/* Direct jump. */
if (USE_REG_TB) {
/* make sure the patch is 8-byte aligned. */
if ((intptr_t)s->code_ptr & 4) {
tcg_out_nop(s);
}
- s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
+ set_jmp_insn_offset(s, a0);
tcg_out_sethi(s, TCG_REG_T1, 0);
tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, 0, ARITH_OR);
tcg_out_arith(s, TCG_REG_G0, TCG_REG_TB, TCG_REG_T1, JMPL);
tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
} else {
- s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
+ set_jmp_insn_offset(s, a0);
tcg_out32(s, CALL);
tcg_out_nop(s);
}
diff --git a/tcg/tcg.c b/tcg/tcg.c
index 2574793..4092dac 100644
--- a/tcg/tcg.c
+++ b/tcg/tcg.c
@@ -313,6 +313,16 @@ static void set_jmp_reset_offset(TCGContext *s, int which)
s->tb_jmp_reset_offset[which] = tcg_current_code_size(s);
}
+static void G_GNUC_UNUSED set_jmp_insn_offset(TCGContext *s, int which)
+{
+ /*
+ * We will check for overflow at the end of the opcode loop in
+ * tcg_gen_code, where we bound tcg_current_code_size to UINT16_MAX.
+ */
+ tcg_debug_assert(TCG_TARGET_HAS_direct_jump);
+ s->tb_jmp_insn_offset[which] = tcg_current_code_size(s);
+}
+
/* Signal overflow, starting over with fewer guest insns. */
static G_NORETURN
void tcg_raise_tb_overflow(TCGContext *s)