aboutsummaryrefslogtreecommitdiff
path: root/tcg/aarch64/tcg-target.inc.c
diff options
context:
space:
mode:
Diffstat (limited to 'tcg/aarch64/tcg-target.inc.c')
-rw-r--r--tcg/aarch64/tcg-target.inc.c78
1 files changed, 39 insertions, 39 deletions
diff --git a/tcg/aarch64/tcg-target.inc.c b/tcg/aarch64/tcg-target.inc.c
index 04bc369..c2f3812 100644
--- a/tcg/aarch64/tcg-target.inc.c
+++ b/tcg/aarch64/tcg-target.inc.c
@@ -10,7 +10,7 @@
* See the COPYING file in the top-level directory for details.
*/
-#include "tcg-be-ldst.h"
+#include "tcg-pool.inc.c"
#include "qemu/bitops.h"
/* We're going to re-use TCGType in setting of the SF bit, which controls
@@ -588,9 +588,11 @@ static void tcg_out_logicali(TCGContext *s, AArch64Insn insn, TCGType ext,
static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
tcg_target_long value)
{
- int i, wantinv, shift;
tcg_target_long svalue = value;
tcg_target_long ivalue = ~value;
+ tcg_target_long t0, t1, t2;
+ int s0, s1;
+ AArch64Insn opc;
/* For 32-bit values, discard potential garbage in value. For 64-bit
values within [2**31, 2**32-1], we can create smaller sequences by
@@ -639,38 +641,29 @@ static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
}
}
- /* Would it take fewer insns to begin with MOVN? For the value and its
- inverse, count the number of 16-bit lanes that are 0. */
- for (i = wantinv = 0; i < 64; i += 16) {
- tcg_target_long mask = 0xffffull << i;
- wantinv -= ((value & mask) == 0);
- wantinv += ((ivalue & mask) == 0);
- }
-
- if (wantinv <= 0) {
- /* Find the lowest lane that is not 0x0000. */
- shift = ctz64(value) & (63 & -16);
- tcg_out_insn(s, 3405, MOVZ, type, rd, value >> shift, shift);
- /* Clear out the lane that we just set. */
- value &= ~(0xffffUL << shift);
- /* Iterate until all non-zero lanes have been processed. */
- while (value) {
- shift = ctz64(value) & (63 & -16);
- tcg_out_insn(s, 3405, MOVK, type, rd, value >> shift, shift);
- value &= ~(0xffffUL << shift);
- }
+ /* Would it take fewer insns to begin with MOVN? */
+ if (ctpop64(value) >= 32) {
+ t0 = ivalue;
+ opc = I3405_MOVN;
} else {
- /* Like above, but with the inverted value and MOVN to start. */
- shift = ctz64(ivalue) & (63 & -16);
- tcg_out_insn(s, 3405, MOVN, type, rd, ivalue >> shift, shift);
- ivalue &= ~(0xffffUL << shift);
- while (ivalue) {
- shift = ctz64(ivalue) & (63 & -16);
- /* Provide MOVK with the non-inverted value. */
- tcg_out_insn(s, 3405, MOVK, type, rd, ~(ivalue >> shift), shift);
- ivalue &= ~(0xffffUL << shift);
+ t0 = value;
+ opc = I3405_MOVZ;
+ }
+ s0 = ctz64(t0) & (63 & -16);
+ t1 = t0 & ~(0xffffUL << s0);
+ s1 = ctz64(t1) & (63 & -16);
+ t2 = t1 & ~(0xffffUL << s1);
+ if (t2 == 0) {
+ tcg_out_insn_3405(s, opc, type, rd, t0 >> s0, s0);
+ if (t1 != 0) {
+ tcg_out_insn(s, 3405, MOVK, type, rd, value >> s1, s1);
}
+ return;
}
+
+ /* For more than 2 insns, dump it into the constant pool. */
+ new_pool_label(s, value, R_AARCH64_CONDBR19, s->code_ptr, 0);
+ tcg_out_insn(s, 3305, LDR, 0, rd);
}
/* Define something more legible for general use. */
@@ -871,9 +864,8 @@ static inline void tcg_out_call(TCGContext *s, tcg_insn_unit *target)
}
}
-#ifdef USE_DIRECT_JUMP
-
-void aarch64_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr)
+void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr,
+ uintptr_t addr)
{
tcg_insn_unit i1, i2;
TCGType rt = TCG_TYPE_I64;
@@ -898,8 +890,6 @@ void aarch64_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr)
flush_icache_range(jmp_addr, jmp_addr + 8);
}
-#endif
-
static inline void tcg_out_goto_label(TCGContext *s, TCGLabel *l)
{
if (!l->has_value) {
@@ -1073,6 +1063,8 @@ static void tcg_out_cltz(TCGContext *s, TCGType ext, TCGReg d,
}
#ifdef CONFIG_SOFTMMU
+#include "tcg-ldst.inc.c"
+
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
* TCGMemOpIdx oi, uintptr_t ra)
*/
@@ -1412,7 +1404,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_goto_tb:
if (s->tb_jmp_insn_offset != NULL) {
- /* USE_DIRECT_JUMP */
+ /* TCG_TARGET_HAS_direct_jump */
/* Ensure that ADRP+ADD are 8-byte aligned so that an atomic
write can be used to patch the target address. */
if ((uintptr_t)s->code_ptr & 7) {
@@ -1420,11 +1412,11 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
}
s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
/* actual branch destination will be patched by
- aarch64_tb_set_jmp_target later. */
+ tb_target_set_jmp_target later. */
tcg_out_insn(s, 3406, ADRP, TCG_REG_TMP, 0);
tcg_out_insn(s, 3401, ADDI, TCG_TYPE_I64, TCG_REG_TMP, TCG_REG_TMP, 0);
} else {
- /* !USE_DIRECT_JUMP */
+ /* !TCG_TARGET_HAS_direct_jump */
tcg_debug_assert(s->tb_jmp_target_addr != NULL);
intptr_t offset = tcg_pcrel_diff(s, (s->tb_jmp_target_addr + a0)) >> 2;
tcg_out_insn(s, 3305, LDR, offset, TCG_REG_TMP);
@@ -2032,6 +2024,14 @@ static void tcg_target_qemu_prologue(TCGContext *s)
tcg_out_insn(s, 3207, RET, TCG_REG_LR);
}
+static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
+{
+ int i;
+ for (i = 0; i < count; ++i) {
+ p[i] = NOP;
+ }
+}
+
typedef struct {
DebugFrameHeader h;
uint8_t fde_def_cfa[4];