diff options
-rw-r--r-- | gcc/ChangeLog | 7 | ||||
-rw-r--r-- | gcc/config/rs6000/rs6000.c | 37 |
2 files changed, 39 insertions, 5 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 1db0c56..2a47a1b 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,10 @@ +2004-01-20 Hartmut Penner <hpenner@de.ibm.com> + + * gcc/config/rs6000/rs6000.c (function_arg) Handle + vector register special in function without prototype. + (function_arg_advance): Vector parameters get always + GPRs allocated for the linux64 target. + 2004-01-20 Kazuhiro Inaoka <inaoka.kazuhiro@renesas.com> * config/m32r/m32r.h (TARGET_M32R2). Test for TARGET_M32R2_MASK diff --git a/gcc/config/rs6000/rs6000.c b/gcc/config/rs6000/rs6000.c index e19b007..5b079ac 100644 --- a/gcc/config/rs6000/rs6000.c +++ b/gcc/config/rs6000/rs6000.c @@ -3941,10 +3941,12 @@ function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode, if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, named)) cum->vregno++; - /* In variable-argument functions, vector arguments get GPRs allocated - even if they are going to be passed in a vector register. */ - if (cum->stdarg && DEFAULT_ABI != ABI_V4) - { + /* PowerPC64 Linux and AIX allocates GPRs for a vector argument + even if it is going to be passed in a vector register. + Darwin does the same for variable-argument functions. */ + if ((DEFAULT_ABI == ABI_AIX && TARGET_64BIT) + || (cum->stdarg && DEFAULT_ABI != ABI_V4)) + { int align; /* Vector parameters must be 16-byte aligned. This places @@ -4248,7 +4250,32 @@ function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, } if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, named)) - return gen_rtx_REG (mode, cum->vregno); + if (TARGET_64BIT && ! cum->prototype) + { + /* Vector parameters get passed in vector register + and also in GPRs or memory, in absence of prototype. */ + int align_words; + rtx slot; + align_words = (cum->words + 1) & ~1; + + if (align_words >= GP_ARG_NUM_REG) + { + slot = NULL_RTX; + } + else + { + slot = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words); + } + return gen_rtx_PARALLEL (mode, + gen_rtvec (2, + gen_rtx_EXPR_LIST (VOIDmode, + slot, const0_rtx), + gen_rtx_EXPR_LIST (VOIDmode, + gen_rtx_REG (mode, cum->vregno), + const0_rtx))); + } + else + return gen_rtx_REG (mode, cum->vregno); else if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode)) { if (named || abi == ABI_V4) |