diff options
author | Roger Sayle <roger@nextmovesoftware.com> | 2021-08-17 14:50:54 +0100 |
---|---|---|
committer | Roger Sayle <roger@nextmovesoftware.com> | 2021-08-17 14:53:04 +0100 |
commit | 408579c9c9b8fee20e1d8114489ce2b93872767c (patch) | |
tree | 4283a897742f5041bb10b4778665e29420188a39 /gcc | |
parent | f8d535f3fec81c1cc84e22df5500e693544ec65b (diff) | |
download | gcc-408579c9c9b8fee20e1d8114489ce2b93872767c.zip gcc-408579c9c9b8fee20e1d8114489ce2b93872767c.tar.gz gcc-408579c9c9b8fee20e1d8114489ce2b93872767c.tar.bz2 |
Improved handling of MULT_EXPR in bit CCP.
This patch allows GCC to constant fold (i | (i<<16)) | ((i<<24) | (i<<8)),
where i is an unsigned char, or the equivalent (i*65537) | (i*16777472), to
i*16843009. The trick is to teach tree_nonzero_bits which bits may be
set in the result of a multiplication by a constant given which bits are
potentially set in the operands. This allows the optimizations recently
added to match.pd to catch more cases.
The required mask/value pair from a multiplication may be calculated using
a classical shift-and-add algorithm, given we already have implementations
for both addition and shift by constant. To keep this optimization "cheap",
this functionality is only used if the constant multiplier has a few bits
set (unless flag_expensive_optimizations), and we provide a special case
fast-path implementation for the common case where the (non-constant)
operand has no bits that are guaranteed to be set. I have no evidence
that this functionality causes performance issues, it's just that sparse
multipliers provide the largest benefit to CCP.
2021-08-17 Roger Sayle <roger@nextmovesoftware.com>
gcc/ChangeLog
* tree-ssa-ccp.c (bit_value_mult_const): New helper function to
calculate the mask-value pair result of a multiplication by an
unsigned constant.
(bit_value_binop) [MULT_EXPR]: Call it from here for
multiplications by (sparse) non-negative constants.
gcc/testsuite/ChangeLog
* gcc.dg/fold-ior-5.c: New test case.
Diffstat (limited to 'gcc')
-rw-r--r-- | gcc/testsuite/gcc.dg/fold-ior-5.c | 17 | ||||
-rw-r--r-- | gcc/tree-ssa-ccp.c | 105 |
2 files changed, 104 insertions, 18 deletions
diff --git a/gcc/testsuite/gcc.dg/fold-ior-5.c b/gcc/testsuite/gcc.dg/fold-ior-5.c new file mode 100644 index 0000000..8de5697 --- /dev/null +++ b/gcc/testsuite/gcc.dg/fold-ior-5.c @@ -0,0 +1,17 @@ +/* { dg-do compile } */ +/* { dg-options "-O2 -fdump-tree-optimized" } */ + +unsigned int test_ior(unsigned char i) +{ + return (i | (i<<16)) | ((i<<24) | (i<<8)); +} + +unsigned int test_xor(unsigned char i) +{ + return (i ^ (i<<16)) ^ ((i<<24) ^ (i<<8)); +} + +/* { dg-final { scan-tree-dump-not " \\^ " "optimized" } } */ +/* { dg-final { scan-tree-dump-not " \\| " "optimized" } } */ +/* { dg-final { scan-tree-dump-times " \\* 16843009" 2 "optimized" } } */ + diff --git a/gcc/tree-ssa-ccp.c b/gcc/tree-ssa-ccp.c index 003c9c2..8e4d8ae 100644 --- a/gcc/tree-ssa-ccp.c +++ b/gcc/tree-ssa-ccp.c @@ -1389,6 +1389,66 @@ bit_value_unop (enum tree_code code, signop type_sgn, int type_precision, } } +/* Determine the mask pair *VAL and *MASK from multiplying the + argument mask pair RVAL, RMASK by the unsigned constant C. */ +void +bit_value_mult_const (signop sgn, int width, + widest_int *val, widest_int *mask, + const widest_int &rval, const widest_int &rmask, + widest_int c) +{ + widest_int sum_mask = 0; + + /* Ensure rval_lo only contains known bits. */ + widest_int rval_lo = wi::bit_and_not (rval, rmask); + + if (rval_lo != 0) + { + /* General case (some bits of multiplicand are known set). */ + widest_int sum_val = 0; + while (c != 0) + { + /* Determine the lowest bit set in the multiplier. */ + int bitpos = wi::ctz (c); + widest_int term_mask = rmask << bitpos; + widest_int term_val = rval_lo << bitpos; + + /* sum += term. */ + widest_int lo = sum_val + term_val; + widest_int hi = (sum_val | sum_mask) + (term_val | term_mask); + sum_mask |= term_mask | (lo ^ hi); + sum_val = lo; + + /* Clear this bit in the multiplier. */ + c ^= wi::lshift (1, bitpos); + } + /* Correctly extend the result value. */ + *val = wi::ext (sum_val, width, sgn); + } + else + { + /* Special case (no bits of multiplicand are known set). */ + while (c != 0) + { + /* Determine the lowest bit set in the multiplier. */ + int bitpos = wi::ctz (c); + widest_int term_mask = rmask << bitpos; + + /* sum += term. */ + widest_int hi = sum_mask + term_mask; + sum_mask |= term_mask | hi; + + /* Clear this bit in the multiplier. */ + c ^= wi::lshift (1, bitpos); + } + *val = 0; + } + + /* Correctly extend the result mask. */ + *mask = wi::ext (sum_mask, width, sgn); +} + + /* Apply the operation CODE in type TYPE to the value, mask pairs R1VAL, R1MASK and R2VAL, R2MASK representing a values of type R1TYPE and R2TYPE and set the value, mask pair *VAL and *MASK to the result. */ @@ -1533,24 +1593,33 @@ bit_value_binop (enum tree_code code, signop sgn, int width, } case MULT_EXPR: - { - /* Just track trailing zeros in both operands and transfer - them to the other. */ - int r1tz = wi::ctz (r1val | r1mask); - int r2tz = wi::ctz (r2val | r2mask); - if (r1tz + r2tz >= width) - { - *mask = 0; - *val = 0; - } - else if (r1tz + r2tz > 0) - { - *mask = wi::ext (wi::mask <widest_int> (r1tz + r2tz, true), - width, sgn); - *val = 0; - } - break; - } + if (r2mask == 0 + && !wi::neg_p (r2val, sgn) + && (flag_expensive_optimizations || wi::popcount (r2val) < 8)) + bit_value_mult_const (sgn, width, val, mask, r1val, r1mask, r2val); + else if (r1mask == 0 + && !wi::neg_p (r1val, sgn) + && (flag_expensive_optimizations || wi::popcount (r1val) < 8)) + bit_value_mult_const (sgn, width, val, mask, r2val, r2mask, r1val); + else + { + /* Just track trailing zeros in both operands and transfer + them to the other. */ + int r1tz = wi::ctz (r1val | r1mask); + int r2tz = wi::ctz (r2val | r2mask); + if (r1tz + r2tz >= width) + { + *mask = 0; + *val = 0; + } + else if (r1tz + r2tz > 0) + { + *mask = wi::ext (wi::mask <widest_int> (r1tz + r2tz, true), + width, sgn); + *val = 0; + } + } + break; case EQ_EXPR: case NE_EXPR: |