aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJeffrey A Law <law@cygnus.com>1998-08-26 17:06:03 +0000
committerJeff Law <law@gcc.gnu.org>1998-08-26 11:06:03 -0600
commit425c08a151458c1c44c27d191ef8d46756980b06 (patch)
tree9f3ebc504caa79d3cdd95e4d5b90252503ede2bd
parentcc1f775259572ef1e0158a799d436f99782f1701 (diff)
downloadgcc-425c08a151458c1c44c27d191ef8d46756980b06.zip
gcc-425c08a151458c1c44c27d191ef8d46756980b06.tar.gz
gcc-425c08a151458c1c44c27d191ef8d46756980b06.tar.bz2
calls.c (expand_call): Use bitfield instructions to extract/deposit word sized hunks when...
* calls.c (expand_call): Use bitfield instructions to extract/deposit word sized hunks when loading unaligned args into registers. From-SVN: r22008
-rw-r--r--gcc/ChangeLog3
-rw-r--r--gcc/calls.c34
2 files changed, 17 insertions, 20 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 06a8f16..223e78a 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -14,6 +14,9 @@ Wed Aug 26 09:30:59 1998 Nick Clifton <nickc@cygnus.com>
Wed Aug 26 12:57:09 1998 Jeffrey A Law (law@cygnus.com)
+ * calls.c (expand_call): Use bitfield instructions to extract/deposit
+ word sized hunks when loading unaligned args into registers.
+
* haifa-sched.c (sched_analyze_insn): Only create scheduling
barriers for LOOP, EH and SETJMP notes on the loop_notes list.
diff --git a/gcc/calls.c b/gcc/calls.c
index 233b1a7..728d6d6 100644
--- a/gcc/calls.c
+++ b/gcc/calls.c
@@ -1800,16 +1800,16 @@ expand_call (exp, target, ignore)
{
rtx reg = gen_reg_rtx (word_mode);
rtx word = operand_subword_force (args[i].value, j, BLKmode);
- int bitsize = TYPE_ALIGN (TREE_TYPE (args[i].tree_value));
- int bitpos;
+ int bitsize = MIN (bytes * BITS_PER_UNIT, BITS_PER_WORD);
+ int bitalign = TYPE_ALIGN (TREE_TYPE (args[i].tree_value));
args[i].aligned_regs[j] = reg;
- /* Clobber REG and move each partword into it. Ensure we don't
- go past the end of the structure. Note that the loop below
- works because we've already verified that padding
- and endianness are compatible.
+ /* There is no need to restrict this code to loading items
+ in TYPE_ALIGN sized hunks. The bitfield instructions can
+ load up entire word sized registers efficiently.
+ ??? This may not be needed anymore.
We use to emit a clobber here but that doesn't let later
passes optimize the instructions we emit. By storing 0 into
the register later passes know the first AND to zero out the
@@ -1818,20 +1818,14 @@ expand_call (exp, target, ignore)
emit_move_insn (reg, const0_rtx);
- for (bitpos = 0;
- bitpos < BITS_PER_WORD && bytes > 0;
- bitpos += bitsize, bytes -= bitsize / BITS_PER_UNIT)
- {
- int xbitpos = bitpos + big_endian_correction;
-
- store_bit_field (reg, bitsize, xbitpos, word_mode,
- extract_bit_field (word, bitsize, bitpos, 1,
- NULL_RTX, word_mode,
- word_mode,
- bitsize / BITS_PER_UNIT,
- BITS_PER_WORD),
- bitsize / BITS_PER_UNIT, BITS_PER_WORD);
- }
+ bytes -= bitsize / BITS_PER_UNIT;
+ store_bit_field (reg, bitsize, big_endian_correction, word_mode,
+ extract_bit_field (word, bitsize, 0, 1,
+ NULL_RTX, word_mode,
+ word_mode,
+ bitalign / BITS_PER_UNIT,
+ BITS_PER_WORD),
+ bitalign / BITS_PER_UNIT, BITS_PER_WORD);
}
}