aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorRichard Henderson <rth@redhat.com>2010-07-22 14:40:41 -0700
committerRichard Henderson <rth@gcc.gnu.org>2010-07-22 14:40:41 -0700
commitb78cb618ae222272b2569a31ab9e72e21c6b209a (patch)
tree644104a945f1c15e1f304eb684ac796a416611bf /gcc
parent2e29059874e839158a2e42aa6510ca874568355f (diff)
downloadgcc-b78cb618ae222272b2569a31ab9e72e21c6b209a.zip
gcc-b78cb618ae222272b2569a31ab9e72e21c6b209a.tar.gz
gcc-b78cb618ae222272b2569a31ab9e72e21c6b209a.tar.bz2
re PR target/45027 (FAIL: c-c++-common/dfp/pr36800.c)
PR target/45027 * config/i386/i386.c (setup_incoming_varargs_64): Force the use of V4SFmode for the SSE saves; increase stack alignment if needed. (ix86_gimplify_va_arg): Don't increase stack alignment here. From-SVN: r162429
Diffstat (limited to 'gcc')
-rw-r--r--gcc/ChangeLog7
-rw-r--r--gcc/config/i386/i386.c21
2 files changed, 17 insertions, 11 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index ac003a3..d2576fb 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,10 @@
+2010-07-22 Richard Henderson <rth@redhat.com>
+
+ PR target/45027
+ * config/i386/i386.c (setup_incoming_varargs_64): Force the use
+ of V4SFmode for the SSE saves; increase stack alignment if needed.
+ (ix86_gimplify_va_arg): Don't increase stack alignment here.
+
2010-07-22 Jakub Jelinek <jakub@redhat.com>
PR bootstrap/45028
diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c
index d9dc571..596a6db 100644
--- a/gcc/config/i386/i386.c
+++ b/gcc/config/i386/i386.c
@@ -7073,7 +7073,7 @@ setup_incoming_varargs_64 (CUMULATIVE_ARGS *cum)
/* FPR size of varargs save area. We don't need it if we don't pass
anything in SSE registers. */
- if (cum->sse_nregs && cfun->va_list_fpr_size)
+ if (TARGET_SSE && cfun->va_list_fpr_size)
ix86_varargs_fpr_size = X86_64_SSE_REGPARM_MAX * 16;
else
ix86_varargs_fpr_size = 0;
@@ -7112,12 +7112,13 @@ setup_incoming_varargs_64 (CUMULATIVE_ARGS *cum)
emit_jump_insn (gen_cbranchqi4 (test, XEXP (test, 0), XEXP (test, 1),
label));
- /* If we've determined that we're only loading scalars (and not
- vector data) then we can store doubles instead. */
- if (crtl->stack_alignment_needed < 128)
- smode = DFmode;
- else
- smode = V4SFmode;
+ /* ??? If !TARGET_SSE_TYPELESS_STORES, would we perform better if
+ we used movdqa (i.e. TImode) instead? Perhaps even better would
+ be if we could determine the real mode of the data, via a hook
+ into pass_stdarg. Ignore all that for now. */
+ smode = V4SFmode;
+ if (crtl->stack_alignment_needed < GET_MODE_ALIGNMENT (smode))
+ crtl->stack_alignment_needed = GET_MODE_ALIGNMENT (smode);
max = cum->sse_regno + cfun->va_list_fpr_size / 16;
if (max > X86_64_SSE_REGPARM_MAX)
@@ -7549,8 +7550,7 @@ ix86_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
arg_boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
/* Care for on-stack alignment if needed. */
- if (arg_boundary <= 64
- || integer_zerop (TYPE_SIZE (type)))
+ if (arg_boundary <= 64 || size == 0)
t = ovf;
else
{
@@ -7561,9 +7561,8 @@ ix86_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
size_int (-align));
t = fold_convert (TREE_TYPE (ovf), t);
- if (crtl->stack_alignment_needed < arg_boundary)
- crtl->stack_alignment_needed = arg_boundary;
}
+
gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
gimplify_assign (addr, t, pre_p);