From 120c1084ed53ba2967ba2a6e751cdfb00a17c246 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Fri, 17 Jun 2016 17:02:20 -0700 Subject: tcg: Fix name for high-half register Reviewed-by: David Gibson Signed-off-by: Richard Henderson --- tcg/tcg.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tcg/tcg.c') diff --git a/tcg/tcg.c b/tcg/tcg.c index 254427b..154ffe8 100644 --- a/tcg/tcg.c +++ b/tcg/tcg.c @@ -557,7 +557,7 @@ int tcg_global_mem_new_internal(TCGType type, TCGv_ptr base, ts2->mem_offset = offset + (1 - bigendian) * 4; pstrcpy(buf, sizeof(buf), name); pstrcat(buf, sizeof(buf), "_1"); - ts->name = strdup(buf); + ts2->name = strdup(buf); } else { ts->base_type = type; ts->type = type; -- cgit v1.1 From 59d7c14eeff8d2ad7f61aed86ce5a176113bc153 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 19 Jun 2016 22:59:13 -0700 Subject: tcg: Optimize spills of constants While we can store constants via constrants on INDEX_op_st_i32 et al, we weren't able to spill constants to backing store. Add a new backend interface, tcg_out_sti, which may store the constant (and is allowed to fail). Rearrange the temp_* helpers so that we only attempt to directly store a constant when the temp is becoming dead/free. Signed-off-by: Richard Henderson --- tcg/tcg.c | 165 ++++++++++++++++++++++++++++++++++---------------------------- 1 file changed, 91 insertions(+), 74 deletions(-) (limited to 'tcg/tcg.c') diff --git a/tcg/tcg.c b/tcg/tcg.c index 154ffe8..ef8a855 100644 --- a/tcg/tcg.c +++ b/tcg/tcg.c @@ -108,6 +108,8 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, const int *const_args); static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1, intptr_t arg2); +static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, + TCGReg base, intptr_t ofs); static void tcg_out_call(TCGContext *s, tcg_insn_unit *target); static int tcg_target_const_match(tcg_target_long val, TCGType type, const TCGArgConstraint *arg_ct); @@ -1680,35 +1682,89 @@ static void temp_allocate_frame(TCGContext *s, int temp) static void temp_load(TCGContext *, TCGTemp *, TCGRegSet, TCGRegSet); -/* sync register 'reg' by saving it to the corresponding temporary */ -static void tcg_reg_sync(TCGContext *s, TCGReg reg, TCGRegSet allocated_regs) +/* Mark a temporary as free or dead. If 'free_or_dead' is negative, + mark it free; otherwise mark it dead. */ +static void temp_free_or_dead(TCGContext *s, TCGTemp *ts, int free_or_dead) { - TCGTemp *ts = s->reg_to_temp[reg]; + if (ts->fixed_reg) { + return; + } + if (ts->val_type == TEMP_VAL_REG) { + s->reg_to_temp[ts->reg] = NULL; + } + ts->val_type = (free_or_dead < 0 + || ts->temp_local + || temp_idx(s, ts) < s->nb_globals + ? TEMP_VAL_MEM : TEMP_VAL_DEAD); +} - tcg_debug_assert(ts->val_type == TEMP_VAL_REG); - if (!ts->mem_coherent && !ts->fixed_reg) { +/* Mark a temporary as dead. */ +static inline void temp_dead(TCGContext *s, TCGTemp *ts) +{ + temp_free_or_dead(s, ts, 1); +} + +/* Sync a temporary to memory. 'allocated_regs' is used in case a temporary + registers needs to be allocated to store a constant. If 'free_or_dead' + is non-zero, subsequently release the temporary; if it is positive, the + temp is dead; if it is negative, the temp is free. */ +static void temp_sync(TCGContext *s, TCGTemp *ts, + TCGRegSet allocated_regs, int free_or_dead) +{ + if (ts->fixed_reg) { + return; + } + if (!ts->mem_coherent) { if (!ts->mem_allocated) { temp_allocate_frame(s, temp_idx(s, ts)); - } else if (ts->indirect_reg) { - tcg_regset_set_reg(allocated_regs, ts->reg); + } + if (ts->indirect_reg) { + if (ts->val_type == TEMP_VAL_REG) { + tcg_regset_set_reg(allocated_regs, ts->reg); + } temp_load(s, ts->mem_base, tcg_target_available_regs[TCG_TYPE_PTR], allocated_regs); } - tcg_out_st(s, ts->type, reg, ts->mem_base->reg, ts->mem_offset); + switch (ts->val_type) { + case TEMP_VAL_CONST: + /* If we're going to free the temp immediately, then we won't + require it later in a register, so attempt to store the + constant to memory directly. */ + if (free_or_dead + && tcg_out_sti(s, ts->type, ts->val, + ts->mem_base->reg, ts->mem_offset)) { + break; + } + temp_load(s, ts, tcg_target_available_regs[ts->type], + allocated_regs); + /* fallthrough */ + + case TEMP_VAL_REG: + tcg_out_st(s, ts->type, ts->reg, + ts->mem_base->reg, ts->mem_offset); + break; + + case TEMP_VAL_MEM: + break; + + case TEMP_VAL_DEAD: + default: + tcg_abort(); + } + ts->mem_coherent = 1; + } + if (free_or_dead) { + temp_free_or_dead(s, ts, free_or_dead); } - ts->mem_coherent = 1; } /* free register 'reg' by spilling the corresponding temporary if necessary */ static void tcg_reg_free(TCGContext *s, TCGReg reg, TCGRegSet allocated_regs) { TCGTemp *ts = s->reg_to_temp[reg]; - if (ts != NULL) { - tcg_reg_sync(s, reg, allocated_regs); - ts->val_type = TEMP_VAL_MEM; - s->reg_to_temp[reg] = NULL; + temp_sync(s, ts, allocated_regs, -1); } } @@ -1778,45 +1834,9 @@ static void temp_load(TCGContext *s, TCGTemp *ts, TCGRegSet desired_regs, s->reg_to_temp[reg] = ts; } -/* mark a temporary as dead. */ -static inline void temp_dead(TCGContext *s, TCGTemp *ts) -{ - if (ts->fixed_reg) { - return; - } - if (ts->val_type == TEMP_VAL_REG) { - s->reg_to_temp[ts->reg] = NULL; - } - ts->val_type = (temp_idx(s, ts) < s->nb_globals || ts->temp_local - ? TEMP_VAL_MEM : TEMP_VAL_DEAD); -} - -/* sync a temporary to memory. 'allocated_regs' is used in case a - temporary registers needs to be allocated to store a constant. */ -static void temp_sync(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs) -{ - if (ts->fixed_reg) { - return; - } - switch (ts->val_type) { - case TEMP_VAL_CONST: - temp_load(s, ts, tcg_target_available_regs[ts->type], allocated_regs); - /* fallthrough */ - case TEMP_VAL_REG: - tcg_reg_sync(s, ts->reg, allocated_regs); - break; - case TEMP_VAL_DEAD: - case TEMP_VAL_MEM: - break; - default: - tcg_abort(); - } -} - -/* save a temporary to memory. 'allocated_regs' is used in case a - temporary registers needs to be allocated to store a constant. */ -static inline void temp_save(TCGContext *s, TCGTemp *ts, - TCGRegSet allocated_regs) +/* Save a temporary to memory. 'allocated_regs' is used in case a + temporary registers needs to be allocated to store a constant. */ +static void temp_save(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs) { #ifdef USE_LIVENESS_ANALYSIS /* ??? Liveness does not yet incorporate indirect bases. */ @@ -1827,8 +1847,7 @@ static inline void temp_save(TCGContext *s, TCGTemp *ts, return; } #endif - temp_sync(s, ts, allocated_regs); - temp_dead(s, ts); + temp_sync(s, ts, allocated_regs, 1); } /* save globals to their canonical location and assume they can be @@ -1861,7 +1880,7 @@ static void sync_globals(TCGContext *s, TCGRegSet allocated_regs) continue; } #endif - temp_sync(s, ts, allocated_regs); + temp_sync(s, ts, allocated_regs, 0); } } @@ -1905,21 +1924,21 @@ static void tcg_reg_alloc_movi(TCGContext *s, const TCGArg *args, val = args[1]; if (ots->fixed_reg) { - /* for fixed registers, we do not do any constant - propagation */ + /* For fixed registers, we do not do any constant propagation. */ tcg_out_movi(s, ots->type, ots->reg, val); - } else { - /* The movi is not explicitly generated here */ - if (ots->val_type == TEMP_VAL_REG) { - s->reg_to_temp[ots->reg] = NULL; - } - ots->val_type = TEMP_VAL_CONST; - ots->val = val; + return; } - if (NEED_SYNC_ARG(0)) { - temp_sync(s, ots, s->reserved_regs); + + /* The movi is not explicitly generated here. */ + if (ots->val_type == TEMP_VAL_REG) { + s->reg_to_temp[ots->reg] = NULL; } - if (IS_DEAD_ARG(0)) { + ots->val_type = TEMP_VAL_CONST; + ots->val = val; + ots->mem_coherent = 0; + if (NEED_SYNC_ARG(0)) { + temp_sync(s, ots, s->reserved_regs, IS_DEAD_ARG(0)); + } else if (IS_DEAD_ARG(0)) { temp_dead(s, ots); } } @@ -2004,7 +2023,7 @@ static void tcg_reg_alloc_mov(TCGContext *s, const TCGOpDef *def, ots->mem_coherent = 0; s->reg_to_temp[ots->reg] = ots; if (NEED_SYNC_ARG(0)) { - tcg_reg_sync(s, ots->reg, allocated_regs); + temp_sync(s, ots, allocated_regs, 0); } } } @@ -2163,9 +2182,8 @@ static void tcg_reg_alloc_op(TCGContext *s, tcg_out_mov(s, ts->type, ts->reg, reg); } if (NEED_SYNC_ARG(i)) { - tcg_reg_sync(s, reg, allocated_regs); - } - if (IS_DEAD_ARG(i)) { + temp_sync(s, ts, allocated_regs, IS_DEAD_ARG(i)); + } else if (IS_DEAD_ARG(i)) { temp_dead(s, ts); } } @@ -2298,9 +2316,8 @@ static void tcg_reg_alloc_call(TCGContext *s, int nb_oargs, int nb_iargs, ts->mem_coherent = 0; s->reg_to_temp[reg] = ts; if (NEED_SYNC_ARG(i)) { - tcg_reg_sync(s, reg, allocated_regs); - } - if (IS_DEAD_ARG(i)) { + temp_sync(s, ts, allocated_regs, IS_DEAD_ARG(i)); + } else if (IS_DEAD_ARG(i)) { temp_dead(s, ts); } } -- cgit v1.1 From 1f00b27f17518a1bcb4cedca49eaec96a4d560bd Mon Sep 17 00:00:00 2001 From: Sergey Sorokin Date: Thu, 23 Jun 2016 21:16:46 +0300 Subject: tcg: Improve the alignment check infrastructure Some architectures (e.g. ARMv8) need the address which is aligned to a size more than the size of the memory access. To support such check it's enough the current costless alignment check implementation in QEMU, but we need to support an alignment size specifying. Signed-off-by: Sergey Sorokin Message-Id: <1466705806-679898-1-git-send-email-afarallax@yandex.ru> Signed-off-by: Richard Henderson [rth: Assert in tcg_canonicalize_memop. Leave get_alignment_bits available for, though unused by, user-mode. Retain logging difference based on ALIGNED_ONLY.] --- tcg/tcg.c | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) (limited to 'tcg/tcg.c') diff --git a/tcg/tcg.c b/tcg/tcg.c index ef8a855..0c46c43 100644 --- a/tcg/tcg.c +++ b/tcg/tcg.c @@ -999,6 +999,22 @@ static const char * const ldst_name[] = [MO_BEQ] = "beq", }; +static const char * const alignment_name[(MO_AMASK >> MO_ASHIFT) + 1] = { +#ifdef ALIGNED_ONLY + [MO_UNALN >> MO_ASHIFT] = "un+", + [MO_ALIGN >> MO_ASHIFT] = "", +#else + [MO_UNALN >> MO_ASHIFT] = "", + [MO_ALIGN >> MO_ASHIFT] = "al+", +#endif + [MO_ALIGN_2 >> MO_ASHIFT] = "al2+", + [MO_ALIGN_4 >> MO_ASHIFT] = "al4+", + [MO_ALIGN_8 >> MO_ASHIFT] = "al8+", + [MO_ALIGN_16 >> MO_ASHIFT] = "al16+", + [MO_ALIGN_32 >> MO_ASHIFT] = "al32+", + [MO_ALIGN_64 >> MO_ASHIFT] = "al64+", +}; + void tcg_dump_ops(TCGContext *s) { char buf[128]; @@ -1100,14 +1116,8 @@ void tcg_dump_ops(TCGContext *s) if (op & ~(MO_AMASK | MO_BSWAP | MO_SSIZE)) { qemu_log(",$0x%x,%u", op, ix); } else { - const char *s_al = "", *s_op; - if (op & MO_AMASK) { - if ((op & MO_AMASK) == MO_ALIGN) { - s_al = "al+"; - } else { - s_al = "un+"; - } - } + const char *s_al, *s_op; + s_al = alignment_name[(op & MO_AMASK) >> MO_ASHIFT]; s_op = ldst_name[op & (MO_BSWAP | MO_SSIZE)]; qemu_log(",%s%s,%u", s_al, s_op, ix); } -- cgit v1.1