aboutsummaryrefslogtreecommitdiff
path: root/tcg/aarch64
diff options
context:
space:
mode:
authorRichard Henderson <rth@twiddle.net>2013-08-14 19:32:56 -0700
committerRichard Henderson <rth@redhat.com>2014-04-16 12:12:58 -0400
commitdfeb5fe7700d5e29a276f571aee7f6fc4267ee96 (patch)
tree5cda104066bb7bb484c32774a6637abaf786b143 /tcg/aarch64
parent929f8b55509fb92320e085504a8492b1908b8110 (diff)
downloadqemu-dfeb5fe7700d5e29a276f571aee7f6fc4267ee96.zip
qemu-dfeb5fe7700d5e29a276f571aee7f6fc4267ee96.tar.gz
qemu-dfeb5fe7700d5e29a276f571aee7f6fc4267ee96.tar.bz2
tcg-aarch64: Use MOVN in tcg_out_movi
When profitable, initialize the register with MOVN instead of MOVZ, before setting the remaining lanes with MOVK. Reviewed-by: Claudio Fontana <claudio.fontana@huawei.com> Signed-off-by: Richard Henderson <rth@twiddle.net>
Diffstat (limited to 'tcg/aarch64')
-rw-r--r--tcg/aarch64/tcg-target.c63
1 files changed, 50 insertions, 13 deletions
diff --git a/tcg/aarch64/tcg-target.c b/tcg/aarch64/tcg-target.c
index 5e6d10b..1d7612c 100644
--- a/tcg/aarch64/tcg-target.c
+++ b/tcg/aarch64/tcg-target.c
@@ -531,24 +531,61 @@ static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
tcg_target_long value)
{
AArch64Insn insn;
-
- if (type == TCG_TYPE_I32) {
+ int i, wantinv, shift;
+ tcg_target_long svalue = value;
+ tcg_target_long ivalue = ~value;
+ tcg_target_long imask;
+
+ /* For 32-bit values, discard potential garbage in value. For 64-bit
+ values within [2**31, 2**32-1], we can create smaller sequences by
+ interpreting this as a negative 32-bit number, while ensuring that
+ the high 32 bits are cleared by setting SF=0. */
+ if (type == TCG_TYPE_I32 || (value & ~0xffffffffull) == 0) {
+ svalue = (int32_t)value;
value = (uint32_t)value;
+ ivalue = (uint32_t)ivalue;
+ type = TCG_TYPE_I32;
+ }
+
+ /* Would it take fewer insns to begin with MOVN? For the value and its
+ inverse, count the number of 16-bit lanes that are 0. */
+ for (i = wantinv = imask = 0; i < 64; i += 16) {
+ tcg_target_long mask = 0xffffull << i;
+ if ((value & mask) == 0) {
+ wantinv -= 1;
+ }
+ if ((ivalue & mask) == 0) {
+ wantinv += 1;
+ imask |= mask;
+ }
}
- /* count trailing zeros in 16 bit steps, mapping 64 to 0. Emit the
- first MOVZ with the half-word immediate skipping the zeros, with a shift
- (LSL) equal to this number. Then all next instructions use MOVKs.
- Zero the processed half-word in the value, continue until empty.
- We build the final result 16bits at a time with up to 4 instructions,
- but do not emit instructions for 16bit zero holes. */
+ /* If we had more 0xffff than 0x0000, invert VALUE and use MOVN. */
insn = I3405_MOVZ;
- do {
- unsigned shift = ctz64(value) & (63 & -16);
- tcg_out_insn_3405(s, insn, shift >= 32, rd, value >> shift, shift);
+ if (wantinv > 0) {
+ value = ivalue;
+ insn = I3405_MOVN;
+ }
+
+ /* Find the lowest lane that is not 0x0000. */
+ shift = ctz64(value) & (63 & -16);
+ tcg_out_insn_3405(s, insn, type, rd, value >> shift, shift);
+
+ if (wantinv > 0) {
+ /* Re-invert the value, so MOVK sees non-inverted bits. */
+ value = ~value;
+ /* Clear out all the 0xffff lanes. */
+ value ^= imask;
+ }
+ /* Clear out the lane that we just set. */
+ value &= ~(0xffffUL << shift);
+
+ /* Iterate until all lanes have been set, and thus cleared from VALUE. */
+ while (value) {
+ shift = ctz64(value) & (63 & -16);
+ tcg_out_insn(s, 3405, MOVK, type, rd, value >> shift, shift);
value &= ~(0xffffUL << shift);
- insn = I3405_MOVK;
- } while (value);
+ }
}
static inline void tcg_out_ldst_r(TCGContext *s,