diff options
author | Andrew MacLeod <amacleod@cygnus.com> | 1999-01-21 11:33:44 +0000 |
---|---|---|
committer | Andrew Macleod <amacleod@gcc.gnu.org> | 1999-01-21 11:33:44 +0000 |
commit | fbe1758db29ec4459a0a9bce7827597d37b6523a (patch) | |
tree | 2dd0db2d241549b740b9698c35b5d5fabd5b22e5 /gcc | |
parent | 0003feb203c94e5ed853432b8c8be9079761244e (diff) | |
download | gcc-fbe1758db29ec4459a0a9bce7827597d37b6523a.zip gcc-fbe1758db29ec4459a0a9bce7827597d37b6523a.tar.gz gcc-fbe1758db29ec4459a0a9bce7827597d37b6523a.tar.bz2 |
expr.c (MOVE_BY_PIECES_P): Define condition for deciding to use move_by_pieces.
* expr.c (MOVE_BY_PIECES_P): Define condition for deciding to use
move_by_pieces.
(MOVE_MAX_PIECES): Define maximum number of bytes to move at once.
(USE_LOAD_POST_INCREMENT, USE_LOAD_PRE_DECREMENT): Define defaults.
(USE_STORE_POST_INCREMENT, USE_STORE_PRE_DECREMENT): Define defaults.
(move_by_pieces): Use new macros.
(emit_block_move): Use new macros.
(clear_by_pieces): Use new macros.
(clear_storage): Use new macros.
(emit_push_insn): Use new macros.
(expand_expr): Use new macros.
* config/sh/sh.h (USE_LOAD_POST_INCREMENT, USE_LOAD_PRE_DECREMENT):
Define.
(USE_STORE_POST_INCREMENT, USE_STORE_PRE_DECREMENT): Define.
(MOVE_BY_PIECES_P): Define based on alignment and TARGET_SMALLCODE.
(MOVE_MAX_PIECES): move 8 bytes on SH4.
* tm.texi(MOVE_BY_PIECES_P, MOVE_MAX_PIECES, USE_LOAD_POST_INCREMENT,
USE_LOAD_PRE_DECREMENT, USE_STORE_POST_INCREMENT,
USE_STORE_PRE_DECREMENT): Describe new macros.
From-SVN: r24801
Diffstat (limited to 'gcc')
-rw-r--r-- | gcc/ChangeLog | 22 | ||||
-rw-r--r-- | gcc/config/sh/sh.h | 17 | ||||
-rw-r--r-- | gcc/expr.c | 95 | ||||
-rw-r--r-- | gcc/tm.texi | 36 |
4 files changed, 143 insertions, 27 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 9bc917b..69e53eb 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,25 @@ +Thu Jan 21 14:18:04 EST 1999 Andrew MacLeod <amacleod@cygnus.com> + + * expr.c (MOVE_BY_PIECES_P): Define condition for deciding to use + move_by_pieces. + (MOVE_MAX_PIECES): Define maximum number of bytes to move at once. + (USE_LOAD_POST_INCREMENT, USE_LOAD_PRE_DECREMENT): Define defaults. + (USE_STORE_POST_INCREMENT, USE_STORE_PRE_DECREMENT): Define defaults. + (move_by_pieces): Use new macros. + (emit_block_move): Use new macros. + (clear_by_pieces): Use new macros. + (clear_storage): Use new macros. + (emit_push_insn): Use new macros. + (expand_expr): Use new macros. + * config/sh/sh.h (USE_LOAD_POST_INCREMENT, USE_LOAD_PRE_DECREMENT): + Define. + (USE_STORE_POST_INCREMENT, USE_STORE_PRE_DECREMENT): Define. + (MOVE_BY_PIECES_P): Define based on alignment and TARGET_SMALLCODE. + (MOVE_MAX_PIECES): move 8 bytes on SH4. + * tm.texi(MOVE_BY_PIECES_P, MOVE_MAX_PIECES, USE_LOAD_POST_INCREMENT, + USE_LOAD_PRE_DECREMENT, USE_STORE_POST_INCREMENT, + USE_STORE_PRE_DECREMENT): Describe new macros. + Thu Jan 21 14:13:31 1999 Vladimir N. Makarov <vmakarov@cygnus.com> * varasm.c (output_constant_pool): Use floor_log2 instead of diff --git a/gcc/config/sh/sh.h b/gcc/config/sh/sh.h index e2bf8c7..260e9cb 100644 --- a/gcc/config/sh/sh.h +++ b/gcc/config/sh/sh.h @@ -1155,6 +1155,17 @@ extern struct rtx_def *sh_builtin_saveregs (); /*#define HAVE_POST_DECREMENT 1*/ #define HAVE_PRE_DECREMENT 1 +#define USE_LOAD_POST_INCREMENT(mode) ((mode == SImode || mode == DImode) \ + ? 0 : 1) +#define USE_LOAD_PRE_DECREMENT(mode) 0 +#define USE_STORE_POST_INCREMENT(mode) 0 +#define USE_STORE_PRE_DECREMENT(mode) ((mode == SImode || mode == DImode) \ + ? 0 : 1) + +#define MOVE_BY_PIECES_P(SIZE, ALIGN) (move_by_pieces_ninsns (SIZE, ALIGN) \ + < (TARGET_SMALLCODE ? 2 : \ + ((ALIGN >= 4) ? 16 : 2))) + /* Macros to check register numbers against specific register classes. */ /* These assume that REGNO is a hard or pseudo reg number. @@ -1509,6 +1520,10 @@ extern struct rtx_def *sh_builtin_saveregs (); in one reasonably fast instruction. */ #define MOVE_MAX 4 +/* Max number of bytes we want move_by_pieces to be able to copy + efficiently. */ +#define MOVE_MAX_PIECES (TARGET_SH4 ? 8 : 4) + /* Define if operations between registers always perform the operation on the full register even if a narrower mode is specified. */ #define WORD_REGISTER_OPERATIONS @@ -2071,8 +2086,6 @@ extern int rtx_equal_function_value_matters; extern struct rtx_def *fpscr_rtx; extern struct rtx_def *get_fpscr_rtx (); - -#define MOVE_RATIO (TARGET_SMALLCODE ? 2 : 16) /* Instructions with unfilled delay slots take up an extra two bytes for the nop in the delay slot. */ @@ -210,6 +210,13 @@ static char direct_store[NUM_MACHINE_MODES]; #endif #endif +/* This macro is used to determine whether move_by_pieces should be called + to perform a structure copy. */ +#ifndef MOVE_BY_PIECES_P +#define MOVE_BY_PIECES_P(SIZE, ALIGN) (move_by_pieces_ninsns \ + (SIZE, ALIGN) < MOVE_RATIO) +#endif + /* This array records the insn_code of insns to perform block moves. */ enum insn_code movstr_optab[NUM_MACHINE_MODES]; @@ -1383,6 +1390,38 @@ convert_modes (mode, oldmode, x, unsignedp) return temp; } + +/* This macro is used to determine what the largest unit size that + move_by_pieces can use is. */ + +/* MOVE_MAX_PIECES is the number of bytes at a time which we can + move efficiently, as opposed to MOVE_MAX which is the maximum + number of bhytes we can move with a single instruction. */ + +#ifndef MOVE_MAX_PIECES +#define MOVE_MAX_PIECES MOVE_MAX +#endif + +/* Some architectures do not have complete pre/post increment/decrement + instruction sets, or only move some modes efficiently. these macros + allow us to fine tune move_by_pieces for these targets. */ + +#ifndef USE_LOAD_POST_INCREMENT +#define USE_LOAD_POST_INCREMENT(MODE) HAVE_POST_INCREMENT +#endif + +#ifndef USE_LOAD_PRE_DECREMENT +#define USE_LOAD_PRE_DECREMENT(MODE) HAVE_PRE_DECREMENT +#endif + +#ifndef USE_STORE_POST_INCREMENT +#define USE_STORE_POST_INCREMENT(MODE) HAVE_POST_INCREMENT +#endif + +#ifndef USE_STORE_PRE_DECREMENT +#define USE_STORE_PRE_DECREMENT(MODE) HAVE_PRE_DECREMENT +#endif + /* Generate several move instructions to copy LEN bytes from block FROM to block TO. (These are MEM rtx's with BLKmode). The caller must pass FROM and TO @@ -1396,7 +1435,9 @@ move_by_pieces (to, from, len, align) { struct move_by_pieces data; rtx to_addr = XEXP (to, 0), from_addr = XEXP (from, 0); - int max_size = MOVE_MAX + 1; + int max_size = MOVE_MAX_PIECES + 1; + enum machine_mode mode = VOIDmode, tmode; + enum insn_code icode; data.offset = 0; data.to_addr = to_addr; @@ -1427,13 +1468,19 @@ move_by_pieces (to, from, len, align) if (!(data.autinc_from && data.autinc_to) && move_by_pieces_ninsns (len, align) > 2) { - if (HAVE_PRE_DECREMENT && data.reverse && ! data.autinc_from) + /* Find the mode of the largest move... */ + for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT); + tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode)) + if (GET_MODE_SIZE (tmode) < max_size) + mode = tmode; + + if (USE_LOAD_PRE_DECREMENT (mode) && data.reverse && ! data.autinc_from) { data.from_addr = copy_addr_to_reg (plus_constant (from_addr, len)); data.autinc_from = 1; data.explicit_inc_from = -1; } - if (HAVE_POST_INCREMENT && ! data.autinc_from) + if (USE_LOAD_POST_INCREMENT (mode) && ! data.autinc_from) { data.from_addr = copy_addr_to_reg (from_addr); data.autinc_from = 1; @@ -1441,13 +1488,13 @@ move_by_pieces (to, from, len, align) } if (!data.autinc_from && CONSTANT_P (from_addr)) data.from_addr = copy_addr_to_reg (from_addr); - if (HAVE_PRE_DECREMENT && data.reverse && ! data.autinc_to) + if (USE_STORE_PRE_DECREMENT (mode) && data.reverse && ! data.autinc_to) { data.to_addr = copy_addr_to_reg (plus_constant (to_addr, len)); data.autinc_to = 1; data.explicit_inc_to = -1; } - if (HAVE_POST_INCREMENT && ! data.reverse && ! data.autinc_to) + if (USE_STORE_POST_INCREMENT (mode) && ! data.reverse && ! data.autinc_to) { data.to_addr = copy_addr_to_reg (to_addr); data.autinc_to = 1; @@ -1466,9 +1513,6 @@ move_by_pieces (to, from, len, align) while (max_size > 1) { - enum machine_mode mode = VOIDmode, tmode; - enum insn_code icode; - for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT); tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode)) if (GET_MODE_SIZE (tmode) < max_size) @@ -1622,8 +1666,7 @@ emit_block_move (x, y, size, align) if (size == 0) abort (); - if (GET_CODE (size) == CONST_INT - && (move_by_pieces_ninsns (INTVAL (size), align) < MOVE_RATIO)) + if (GET_CODE (size) == CONST_INT && MOVE_BY_PIECES_P (INTVAL (size), align)) move_by_pieces (x, y, INTVAL (size), align); else { @@ -2217,7 +2260,9 @@ clear_by_pieces (to, len, align) { struct clear_by_pieces data; rtx to_addr = XEXP (to, 0); - int max_size = MOVE_MAX + 1; + int max_size = MOVE_MAX_PIECES + 1; + enum machine_mode mode = VOIDmode, tmode; + enum insn_code icode; data.offset = 0; data.to_addr = to_addr; @@ -2240,13 +2285,19 @@ clear_by_pieces (to, len, align) if (!data.autinc_to && move_by_pieces_ninsns (len, align) > 2) { - if (HAVE_PRE_DECREMENT && data.reverse && ! data.autinc_to) + /* Determine the main mode we'll be using */ + for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT); + tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode)) + if (GET_MODE_SIZE (tmode) < max_size) + mode = tmode; + + if (USE_STORE_PRE_DECREMENT (mode) && data.reverse && ! data.autinc_to) { data.to_addr = copy_addr_to_reg (plus_constant (to_addr, len)); data.autinc_to = 1; data.explicit_inc_to = -1; } - if (HAVE_POST_INCREMENT && ! data.reverse && ! data.autinc_to) + if (USE_STORE_POST_INCREMENT (mode) && ! data.reverse && ! data.autinc_to) { data.to_addr = copy_addr_to_reg (to_addr); data.autinc_to = 1; @@ -2265,9 +2316,6 @@ clear_by_pieces (to, len, align) while (max_size > 1) { - enum machine_mode mode = VOIDmode, tmode; - enum insn_code icode; - for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT); tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode)) if (GET_MODE_SIZE (tmode) < max_size) @@ -2351,7 +2399,7 @@ clear_storage (object, size, align) size = protect_from_queue (size, 0); if (GET_CODE (size) == CONST_INT - && (move_by_pieces_ninsns (INTVAL (size), align) < MOVE_RATIO)) + && MOVE_BY_PIECES_P (INTVAL (size), align)) clear_by_pieces (object, INTVAL (size), align); else @@ -2839,8 +2887,7 @@ emit_push_insn (x, mode, type, size, align, partial, reg, extra, if (args_addr == 0 && GET_CODE (size) == CONST_INT && skip == 0 - && (move_by_pieces_ninsns ((unsigned) INTVAL (size) - used, align) - < MOVE_RATIO) + && (MOVE_BY_PIECES_P ((unsigned) INTVAL (size) - used, align))) /* Here we avoid the case of a structure whose weak alignment forces many pushes of a small amount of data, and such small pushes do rounding that causes trouble. */ @@ -2938,8 +2985,7 @@ emit_push_insn (x, mode, type, size, align, partial, reg, extra, /* TEMP is the address of the block. Copy the data there. */ if (GET_CODE (size) == CONST_INT - && (move_by_pieces_ninsns ((unsigned) INTVAL (size), align) - < MOVE_RATIO)) + && (MOVE_BY_PIECES_P ((unsigned) INTVAL (size), align))) { move_by_pieces (gen_rtx_MEM (BLKmode, temp), xinner, INTVAL (size), align); @@ -6048,10 +6094,9 @@ expand_expr (exp, target, tmode, modifier) && ! (target != 0 && safe_from_p (target, exp, 1))) || TREE_ADDRESSABLE (exp) || (TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST - && (move_by_pieces_ninsns - (TREE_INT_CST_LOW (TYPE_SIZE (type))/BITS_PER_UNIT, - TYPE_ALIGN (type) / BITS_PER_UNIT) - >= MOVE_RATIO) + && (!MOVE_BY_PIECES_P + (TREE_INT_CST_LOW (TYPE_SIZE (type))/BITS_PER_UNIT, + TYPE_ALIGN (type) / BITS_PER_UNIT)) && ! mostly_zeros_p (exp)))) || (modifier == EXPAND_INITIALIZER && TREE_CONSTANT (exp))) { diff --git a/gcc/tm.texi b/gcc/tm.texi index cba1b66..2d7cd29 100644 --- a/gcc/tm.texi +++ b/gcc/tm.texi @@ -4754,6 +4754,42 @@ the corresponding number of memory-to-memory @emph{sequences}. If you don't define this, a reasonable default is used. +@findex MOVE_BY_PIECES_P +@item MOVE_BY_PIECES_P (@var{size}, @var{alignment}) +A C expression used to determine whether @code{move_by_pieces} will be used to +copy a chunk of memory, or whether some other block move mechanism +will be used. Defaults to 1 if @code{move_by_pieces_ninsns} returns less +than @code{MOVE_RATIO}. + +@findex MOVE_MAX_PIECES +@item MOVE_MAX_PIECES +A C expression used by @code{move_by_pieces} to determine the largest unit +a load or store used to copy memory is. Defaults to @code{MOVE_MAX}. + +@findex USE_LOAD_POST_INCREMENT +@item USE_LOAD_POST_INCREMENT (@var{mode}) +A C expression used to determine whether a load postincrement is +a good thing for @code{move_by_pieces} to use for a given mode. Defaults +to the value of @code{HAVE_POST_INCREMENT}. + +@findex USE_LOAD_PRE_INCREMENT +@item USE_LOAD_PRE_INCREMENT (@var{mode}) +A C expression used to determine whether a load preincrement is +a good thing for @code{move_by_pieces} to use for a given mode. Defaults +to the value of @code{HAVE_PRE_INCREMENT}. + +@findex USE_STORE_POST_INCREMENT +@item USE_STORE_POST_INCREMENT (@var{mode}) +A C expression used to determine whether a store postincrement is +a good thing for @code{move_by_pieces} to use for a given mode. Defaults +to the value of @code{HAVE_POST_INCREMENT}. + +@findex USE_STORE_PRE_INCREMENT +@item USE_STORE_PRE_INCREMENT (@var{mode}) +This macro is used to determine whether a store preincrement is +a good thing for @code{move_by_pieces} to use for a given mode. Defaults +to the value of @code{HAVE_PRE_INCREMENT}. + @findex NO_FUNCTION_CSE @item NO_FUNCTION_CSE Define this macro if it is as good or better to call a constant |