aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndreas Krebbel <krebbel@linux.ibm.com>2018-08-08 11:35:45 +0000
committerAndreas Krebbel <krebbel@gcc.gnu.org>2018-08-08 11:35:45 +0000
commit0ff3dcd61a70884eb39c2ee9fb107e8ea14849c8 (patch)
tree5e1f8c3e80e057f30488c4b975fdf55c06937c19
parent8cc6307c8889b92c98d4b905bf2fdd8fb285a577 (diff)
downloadgcc-0ff3dcd61a70884eb39c2ee9fb107e8ea14849c8.zip
gcc-0ff3dcd61a70884eb39c2ee9fb107e8ea14849c8.tar.gz
gcc-0ff3dcd61a70884eb39c2ee9fb107e8ea14849c8.tar.bz2
S/390: Fix whitespace problems in the backend
2018-08-08 Andreas Krebbel <krebbel@linux.ibm.com> * config/s390/s390.c: Fix whitespace damage throughout the file. * config/s390/s390.h: Likewise. * config/s390/tpf.h: Likewise. From-SVN: r263395
-rw-r--r--gcc/ChangeLog6
-rw-r--r--gcc/config/s390/s390.c686
-rw-r--r--gcc/config/s390/s390.h88
-rw-r--r--gcc/config/s390/tpf.h10
4 files changed, 398 insertions, 392 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index b80246b..1a1d0ee 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,9 @@
+2018-08-08 Andreas Krebbel <krebbel@linux.ibm.com>
+
+ * config/s390/s390.c: Fix whitespace damage throughout the file.
+ * config/s390/s390.h: Likewise.
+ * config/s390/tpf.h: Likewise.
+
2018-08-08 Ilya Leoshkevich <iii@linux.ibm.com>
* config/s390/s390.c (s390_loadrelative_operand_p):
diff --git a/gcc/config/s390/s390.c b/gcc/config/s390/s390.c
index 9121d83..d533a3f 100644
--- a/gcc/config/s390/s390.c
+++ b/gcc/config/s390/s390.c
@@ -420,11 +420,11 @@ struct s390_address
#define CONST_OK_FOR_K(x) \
CONST_OK_FOR_CONSTRAINT_P((x), 'K', "K")
#define CONST_OK_FOR_Os(x) \
- CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
+ CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
#define CONST_OK_FOR_Op(x) \
- CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
+ CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
#define CONST_OK_FOR_On(x) \
- CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
+ CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
#define REGNO_PAIR_OK(REGNO, MODE) \
(s390_hard_regno_nregs ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
@@ -919,7 +919,7 @@ s390_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
/* Record the vector mode used for an element selector. This assumes:
1. There is no builtin with two different vector modes and an element selector
- 2. The element selector comes after the vector type it is referring to.
+ 2. The element selector comes after the vector type it is referring to.
This currently the true for all the builtins but FIXME we
should better check for that. */
if (VECTOR_MODE_P (insn_op->mode))
@@ -982,7 +982,7 @@ s390_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
break;
case 1:
if (nonvoid)
- pat = GEN_FCN (icode) (target, op[0]);
+ pat = GEN_FCN (icode) (target, op[0]);
else
pat = GEN_FCN (icode) (op[0]);
break;
@@ -1332,7 +1332,7 @@ s390_cc_modes_compatible (machine_mode m1, machine_mode m2)
case E_CCZmode:
if (m2 == CCUmode || m2 == CCTmode || m2 == CCZ1mode
|| m2 == CCSmode || m2 == CCSRmode || m2 == CCURmode)
- return m2;
+ return m2;
return VOIDmode;
case E_CCSmode:
@@ -1392,20 +1392,20 @@ s390_match_ccmode_set (rtx set, machine_mode req_mode)
case E_CCVFHmode:
case E_CCVFHEmode:
if (req_mode != set_mode)
- return 0;
+ return 0;
break;
case E_CCZmode:
if (req_mode != CCSmode && req_mode != CCUmode && req_mode != CCTmode
&& req_mode != CCSRmode && req_mode != CCURmode
&& req_mode != CCZ1mode)
- return 0;
+ return 0;
break;
case E_CCAPmode:
case E_CCANmode:
if (req_mode != CCAmode)
- return 0;
+ return 0;
break;
default:
@@ -1434,12 +1434,12 @@ s390_match_ccmode (rtx_insn *insn, machine_mode req_mode)
if (GET_CODE (PATTERN (insn)) == PARALLEL)
for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
- {
- rtx set = XVECEXP (PATTERN (insn), 0, i);
- if (GET_CODE (set) == SET)
- if (!s390_match_ccmode_set (set, req_mode))
- return false;
- }
+ {
+ rtx set = XVECEXP (PATTERN (insn), 0, i);
+ if (GET_CODE (set) == SET)
+ if (!s390_match_ccmode_set (set, req_mode))
+ return false;
+ }
return true;
}
@@ -1479,7 +1479,7 @@ s390_tm_ccmode (rtx op1, rtx op2, bool mixed)
bit1 = exact_log2 (INTVAL (op2));
bit0 = exact_log2 (INTVAL (op1) ^ INTVAL (op2));
if (bit0 != -1 && bit1 != -1)
- return bit0 > bit1 ? CCT1mode : CCT2mode;
+ return bit0 > bit1 ? CCT1mode : CCT2mode;
}
return VOIDmode;
@@ -1516,7 +1516,7 @@ s390_select_ccmode (enum rtx_code code, rtx op0, rtx op1)
{
/* Relax CCTmode to CCZmode to allow fall-back to AND
if that turns out to be beneficial. */
- return ccmode == CCTmode ? CCZmode : ccmode;
+ return ccmode == CCTmode ? CCZmode : ccmode;
}
}
@@ -1543,11 +1543,11 @@ s390_select_ccmode (enum rtx_code code, rtx op0, rtx op1)
&& GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
return CCAPmode;
- /* If constants are involved in an add instruction it is possible to use
- the resulting cc for comparisons with zero. Knowing the sign of the
+ /* If constants are involved in an add instruction it is possible to use
+ the resulting cc for comparisons with zero. Knowing the sign of the
constant the overflow behavior gets predictable. e.g.:
- int a, b; if ((b = a + c) > 0)
- with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
+ int a, b; if ((b = a + c) > 0)
+ with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
&& (CONST_OK_FOR_K (INTVAL (XEXP (op0, 1)))
|| (CONST_OK_FOR_CONSTRAINT_P (INTVAL (XEXP (op0, 1)), 'O', "Os")
@@ -1652,8 +1652,8 @@ s390_canonicalize_comparison (int *code, rtx *op0, rtx *op1,
&& (GET_MODE_SIZE (GET_MODE (inner))
>= GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
&& ((INTVAL (mask)
- & GET_MODE_MASK (GET_MODE (inner))
- & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner))))
+ & GET_MODE_MASK (GET_MODE (inner))
+ & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner))))
== 0))
inner = SUBREG_REG (inner);
@@ -1738,8 +1738,8 @@ s390_canonicalize_comparison (int *code, rtx *op0, rtx *op1,
{
/* For CCRAWmode put the required cc mask into the second
operand. */
- if (GET_MODE (XVECEXP (*op0, 0, 0)) == CCRAWmode
- && INTVAL (*op1) >= 0 && INTVAL (*op1) <= 3)
+ if (GET_MODE (XVECEXP (*op0, 0, 0)) == CCRAWmode
+ && INTVAL (*op1) >= 0 && INTVAL (*op1) <= 3)
*op1 = gen_rtx_CONST_INT (VOIDmode, 1 << (3 - INTVAL (*op1)));
*op0 = XVECEXP (*op0, 0, 0);
*code = new_code;
@@ -1756,7 +1756,7 @@ s390_canonicalize_comparison (int *code, rtx *op0, rtx *op1,
&& *op1 == const0_rtx)
{
if ((*code == EQ && GET_CODE (*op0) == NE)
- || (*code == NE && GET_CODE (*op0) == EQ))
+ || (*code == NE && GET_CODE (*op0) == EQ))
*code = EQ;
else
*code = NE;
@@ -1899,65 +1899,65 @@ s390_branch_condition_mask (rtx code)
case E_CCZmode:
case E_CCZ1mode:
switch (GET_CODE (code))
- {
- case EQ: return CC0;
+ {
+ case EQ: return CC0;
case NE: return CC1 | CC2 | CC3;
default: return -1;
- }
+ }
break;
case E_CCT1mode:
switch (GET_CODE (code))
- {
- case EQ: return CC1;
+ {
+ case EQ: return CC1;
case NE: return CC0 | CC2 | CC3;
default: return -1;
- }
+ }
break;
case E_CCT2mode:
switch (GET_CODE (code))
- {
- case EQ: return CC2;
+ {
+ case EQ: return CC2;
case NE: return CC0 | CC1 | CC3;
default: return -1;
- }
+ }
break;
case E_CCT3mode:
switch (GET_CODE (code))
- {
- case EQ: return CC3;
+ {
+ case EQ: return CC3;
case NE: return CC0 | CC1 | CC2;
default: return -1;
- }
+ }
break;
case E_CCLmode:
switch (GET_CODE (code))
- {
- case EQ: return CC0 | CC2;
+ {
+ case EQ: return CC0 | CC2;
case NE: return CC1 | CC3;
default: return -1;
- }
+ }
break;
case E_CCL1mode:
switch (GET_CODE (code))
- {
+ {
case LTU: return CC2 | CC3; /* carry */
case GEU: return CC0 | CC1; /* no carry */
default: return -1;
- }
+ }
break;
case E_CCL2mode:
switch (GET_CODE (code))
- {
+ {
case GTU: return CC0 | CC1; /* borrow */
case LEU: return CC2 | CC3; /* no borrow */
default: return -1;
- }
+ }
break;
case E_CCL3mode:
@@ -1974,96 +1974,96 @@ s390_branch_condition_mask (rtx code)
case E_CCUmode:
switch (GET_CODE (code))
- {
- case EQ: return CC0;
- case NE: return CC1 | CC2 | CC3;
- case LTU: return CC1;
- case GTU: return CC2;
- case LEU: return CC0 | CC1;
- case GEU: return CC0 | CC2;
+ {
+ case EQ: return CC0;
+ case NE: return CC1 | CC2 | CC3;
+ case LTU: return CC1;
+ case GTU: return CC2;
+ case LEU: return CC0 | CC1;
+ case GEU: return CC0 | CC2;
default: return -1;
- }
+ }
break;
case E_CCURmode:
switch (GET_CODE (code))
- {
- case EQ: return CC0;
- case NE: return CC2 | CC1 | CC3;
- case LTU: return CC2;
- case GTU: return CC1;
- case LEU: return CC0 | CC2;
- case GEU: return CC0 | CC1;
+ {
+ case EQ: return CC0;
+ case NE: return CC2 | CC1 | CC3;
+ case LTU: return CC2;
+ case GTU: return CC1;
+ case LEU: return CC0 | CC2;
+ case GEU: return CC0 | CC1;
default: return -1;
- }
+ }
break;
case E_CCAPmode:
switch (GET_CODE (code))
- {
- case EQ: return CC0;
- case NE: return CC1 | CC2 | CC3;
- case LT: return CC1 | CC3;
- case GT: return CC2;
- case LE: return CC0 | CC1 | CC3;
- case GE: return CC0 | CC2;
+ {
+ case EQ: return CC0;
+ case NE: return CC1 | CC2 | CC3;
+ case LT: return CC1 | CC3;
+ case GT: return CC2;
+ case LE: return CC0 | CC1 | CC3;
+ case GE: return CC0 | CC2;
default: return -1;
- }
+ }
break;
case E_CCANmode:
switch (GET_CODE (code))
- {
- case EQ: return CC0;
- case NE: return CC1 | CC2 | CC3;
- case LT: return CC1;
- case GT: return CC2 | CC3;
- case LE: return CC0 | CC1;
- case GE: return CC0 | CC2 | CC3;
+ {
+ case EQ: return CC0;
+ case NE: return CC1 | CC2 | CC3;
+ case LT: return CC1;
+ case GT: return CC2 | CC3;
+ case LE: return CC0 | CC1;
+ case GE: return CC0 | CC2 | CC3;
default: return -1;
- }
+ }
break;
case E_CCSmode:
switch (GET_CODE (code))
- {
- case EQ: return CC0;
- case NE: return CC1 | CC2 | CC3;
- case LT: return CC1;
- case GT: return CC2;
- case LE: return CC0 | CC1;
- case GE: return CC0 | CC2;
+ {
+ case EQ: return CC0;
+ case NE: return CC1 | CC2 | CC3;
+ case LT: return CC1;
+ case GT: return CC2;
+ case LE: return CC0 | CC1;
+ case GE: return CC0 | CC2;
case UNORDERED: return CC3;
case ORDERED: return CC0 | CC1 | CC2;
case UNEQ: return CC0 | CC3;
- case UNLT: return CC1 | CC3;
- case UNGT: return CC2 | CC3;
- case UNLE: return CC0 | CC1 | CC3;
- case UNGE: return CC0 | CC2 | CC3;
+ case UNLT: return CC1 | CC3;
+ case UNGT: return CC2 | CC3;
+ case UNLE: return CC0 | CC1 | CC3;
+ case UNGE: return CC0 | CC2 | CC3;
case LTGT: return CC1 | CC2;
default: return -1;
- }
+ }
break;
case E_CCSRmode:
switch (GET_CODE (code))
- {
- case EQ: return CC0;
- case NE: return CC2 | CC1 | CC3;
- case LT: return CC2;
- case GT: return CC1;
- case LE: return CC0 | CC2;
- case GE: return CC0 | CC1;
+ {
+ case EQ: return CC0;
+ case NE: return CC2 | CC1 | CC3;
+ case LT: return CC2;
+ case GT: return CC1;
+ case LE: return CC0 | CC2;
+ case GE: return CC0 | CC1;
case UNORDERED: return CC3;
case ORDERED: return CC0 | CC2 | CC1;
case UNEQ: return CC0 | CC3;
- case UNLT: return CC2 | CC3;
- case UNGT: return CC1 | CC3;
- case UNLE: return CC0 | CC2 | CC3;
- case UNGE: return CC0 | CC1 | CC3;
+ case UNLT: return CC2 | CC3;
+ case UNGT: return CC1 | CC3;
+ case UNLE: return CC0 | CC2 | CC3;
+ case UNGE: return CC0 | CC1 | CC3;
case LTGT: return CC2 | CC1;
default: return -1;
- }
+ }
break;
/* Vector comparison modes. */
@@ -2515,7 +2515,7 @@ s390_split_ok_p (rtx dst, rtx src, machine_mode mode, int first_subword)
{
rtx subreg = operand_subword (dst, first_subword, 0, mode);
if (reg_overlap_mentioned_p (subreg, src))
- return false;
+ return false;
}
return true;
@@ -2615,7 +2615,7 @@ s390_expand_logical_operator (enum rtx_code code, machine_mode mode,
else if (REG_P (dst))
dst = gen_rtx_SUBREG (wmode, dst, 0);
else
- dst = gen_reg_rtx (wmode);
+ dst = gen_reg_rtx (wmode);
if (GET_CODE (src1) == SUBREG
&& (tem = simplify_subreg (wmode, src1, mode, 0)) != 0)
@@ -2737,7 +2737,7 @@ s390_short_displacement (rtx disp)
if (GET_CODE (disp) == CONST
&& GET_CODE (XEXP (disp, 0)) == UNSPEC
&& (XINT (XEXP (disp, 0), 1) == UNSPEC_GOT
- || XINT (XEXP (disp, 0), 1) == UNSPEC_GOTNTPOFF))
+ || XINT (XEXP (disp, 0), 1) == UNSPEC_GOTNTPOFF))
return false;
/* All other symbolic constants are literal pool references,
@@ -2895,13 +2895,13 @@ s390_decompose_address (rtx addr, struct s390_address *out)
&& frame_pointer_needed
&& REGNO (base) == HARD_FRAME_POINTER_REGNUM)
|| REGNO (base) == ARG_POINTER_REGNUM
- || (flag_pic
- && REGNO (base) == PIC_OFFSET_TABLE_REGNUM))
- pointer = base_ptr = true;
+ || (flag_pic
+ && REGNO (base) == PIC_OFFSET_TABLE_REGNUM))
+ pointer = base_ptr = true;
if ((reload_completed || reload_in_progress)
&& base == cfun->machine->base_reg)
- pointer = base_ptr = literal_pool = true;
+ pointer = base_ptr = literal_pool = true;
}
/* Validate index register. */
@@ -2941,13 +2941,13 @@ s390_decompose_address (rtx addr, struct s390_address *out)
&& frame_pointer_needed
&& REGNO (indx) == HARD_FRAME_POINTER_REGNUM)
|| REGNO (indx) == ARG_POINTER_REGNUM
- || (flag_pic
- && REGNO (indx) == PIC_OFFSET_TABLE_REGNUM))
- pointer = indx_ptr = true;
+ || (flag_pic
+ && REGNO (indx) == PIC_OFFSET_TABLE_REGNUM))
+ pointer = indx_ptr = true;
if ((reload_completed || reload_in_progress)
&& indx == cfun->machine->base_reg)
- pointer = indx_ptr = literal_pool = true;
+ pointer = indx_ptr = literal_pool = true;
}
/* Prefer to use pointer as base, not index. */
@@ -2991,14 +2991,14 @@ s390_decompose_address (rtx addr, struct s390_address *out)
pointer = true;
/* In the small-PIC case, the linker converts @GOT
- and @GOTNTPOFF offsets to possible displacements. */
+ and @GOTNTPOFF offsets to possible displacements. */
if (GET_CODE (disp) == UNSPEC
- && (XINT (disp, 1) == UNSPEC_GOT
+ && (XINT (disp, 1) == UNSPEC_GOT
|| XINT (disp, 1) == UNSPEC_GOTNTPOFF)
&& flag_pic == 1)
- {
+ {
;
- }
+ }
/* Accept pool label offsets. */
else if (GET_CODE (disp) == UNSPEC
@@ -3008,7 +3008,7 @@ s390_decompose_address (rtx addr, struct s390_address *out)
/* Accept literal pool references. */
else if (GET_CODE (disp) == UNSPEC
&& XINT (disp, 1) == UNSPEC_LTREL_OFFSET)
- {
+ {
/* In case CSE pulled a non literal pool reference out of
the pool we have to reject the address. This is
especially important when loading the GOT pointer on non
@@ -3028,9 +3028,9 @@ s390_decompose_address (rtx addr, struct s390_address *out)
if (offset >= GET_MODE_SIZE (get_pool_mode (sym)))
return false;
- orig_disp = plus_constant (Pmode, orig_disp, offset);
+ orig_disp = plus_constant (Pmode, orig_disp, offset);
}
- }
+ }
else
return false;
@@ -3403,7 +3403,7 @@ s390_float_const_zero_p (rtx value)
static int
s390_register_move_cost (machine_mode mode,
- reg_class_t from, reg_class_t to)
+ reg_class_t from, reg_class_t to)
{
/* On s390, copy between fprs and gprs is expensive. */
@@ -3636,14 +3636,14 @@ s390_rtx_costs (rtx x, machine_mode mode, int outer_code,
case UDIV:
case UMOD:
- if (mode == TImode) /* 128 bit division */
+ if (mode == TImode) /* 128 bit division */
*total = s390_cost->dlgr;
else if (mode == DImode)
{
rtx right = XEXP (x, 1);
if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
*total = s390_cost->dlr;
- else /* 64 by 64 bit division */
+ else /* 64 by 64 bit division */
*total = s390_cost->dlgr;
}
else if (mode == SImode) /* 32 bit division */
@@ -3660,7 +3660,7 @@ s390_rtx_costs (rtx x, machine_mode mode, int outer_code,
*total = s390_cost->dsgfr;
else
*total = s390_cost->dr;
- else /* 64 by 64 bit division */
+ else /* 64 by 64 bit division */
*total = s390_cost->dsgr;
}
else if (mode == SImode) /* 32 bit division */
@@ -3941,7 +3941,7 @@ s390_cannot_force_const_mem (machine_mode mode, rtx x)
case SYMBOL_REF:
/* 'Naked' TLS symbol references are never OK,
- non-TLS symbols are OK iff we are non-PIC. */
+ non-TLS symbols are OK iff we are non-PIC. */
if (tls_symbolic_operand (x))
return true;
else
@@ -4117,7 +4117,7 @@ s390_preferred_reload_class (rtx op, reg_class_t rclass)
case CONST:
/* Symrefs cannot be pushed into the literal pool with -fPIC
so we *MUST NOT* return NO_REGS for these cases
- (s390_cannot_force_const_mem will return true).
+ (s390_cannot_force_const_mem will return true).
On the other hand we MUST return NO_REGS for symrefs with
invalid addend which might have been pushed to the literal
@@ -4138,7 +4138,7 @@ s390_preferred_reload_class (rtx op, reg_class_t rclass)
case LABEL_REF:
case SYMBOL_REF:
if (!legitimate_reload_constant_p (op))
- return NO_REGS;
+ return NO_REGS;
/* fallthrough */
case PLUS:
/* load address will be used. */
@@ -4179,7 +4179,7 @@ s390_check_symref_alignment (rtx addr, HOST_WIDE_INT alignment)
if (GET_CODE (symref) == SYMBOL_REF)
{
/* We have load-relative instructions for 2-byte, 4-byte, and
- 8-byte alignment so allow only these. */
+ 8-byte alignment so allow only these. */
switch (alignment)
{
case 8: return !SYMBOL_FLAG_NOTALIGN8_P (symref);
@@ -4339,10 +4339,10 @@ s390_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
case E_##M##mode: \
if (TARGET_64BIT) \
sri->icode = in_p ? CODE_FOR_reload##m##di_toreg_z10 : \
- CODE_FOR_reload##m##di_tomem_z10; \
+ CODE_FOR_reload##m##di_tomem_z10; \
else \
- sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
- CODE_FOR_reload##m##si_tomem_z10; \
+ sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
+ CODE_FOR_reload##m##si_tomem_z10; \
break;
switch (GET_MODE (x))
@@ -4508,7 +4508,7 @@ s390_expand_plus_operand (rtx target, rtx src,
|| (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
{
/* Otherwise, one of the operands cannot be an address register;
- we reload its value into the scratch register. */
+ we reload its value into the scratch register. */
if (true_regnum (sum1) < 1 || true_regnum (sum1) > 15)
{
emit_move_insn (scratch, sum1);
@@ -4521,9 +4521,9 @@ s390_expand_plus_operand (rtx target, rtx src,
}
/* According to the way these invalid addresses are generated
- in reload.c, it should never happen (at least on s390) that
- *neither* of the PLUS components, after find_replacements
- was applied, is an address register. */
+ in reload.c, it should never happen (at least on s390) that
+ *neither* of the PLUS components, after find_replacements
+ was applied, is an address register. */
if (sum1 == scratch && sum2 == scratch)
{
debug_rtx (src);
@@ -4786,7 +4786,7 @@ legitimize_pic_address (rtx orig, rtx reg)
that case. So no need to do it here. */
if (reg == 0)
- reg = gen_reg_rtx (Pmode);
+ reg = gen_reg_rtx (Pmode);
if (TARGET_Z10)
{
@@ -4800,19 +4800,19 @@ legitimize_pic_address (rtx orig, rtx reg)
new_rtx = reg;
}
else if (flag_pic == 1)
- {
- /* Assume GOT offset is a valid displacement operand (< 4k
- or < 512k with z990). This is handled the same way in
- both 31- and 64-bit code (@GOT).
- lg <target>, sym@GOT(r12) */
+ {
+ /* Assume GOT offset is a valid displacement operand (< 4k
+ or < 512k with z990). This is handled the same way in
+ both 31- and 64-bit code (@GOT).
+ lg <target>, sym@GOT(r12) */
if (reload_in_progress || reload_completed)
df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
- new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
- new_rtx = gen_rtx_CONST (Pmode, new_rtx);
- new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
- new_rtx = gen_const_mem (Pmode, new_rtx);
+ new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
+ new_rtx = gen_rtx_CONST (Pmode, new_rtx);
+ new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
+ new_rtx = gen_const_mem (Pmode, new_rtx);
emit_move_insn (reg, new_rtx);
new_rtx = reg;
}
@@ -4821,15 +4821,15 @@ legitimize_pic_address (rtx orig, rtx reg)
/* If the GOT offset might be >= 4k, we determine the position
of the GOT entry via a PC-relative LARL (@GOTENT).
larl temp, sym@GOTENT
- lg <target>, 0(temp) */
+ lg <target>, 0(temp) */
- rtx temp = reg ? reg : gen_reg_rtx (Pmode);
+ rtx temp = reg ? reg : gen_reg_rtx (Pmode);
gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
|| REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
- new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
- new_rtx = gen_rtx_CONST (Pmode, new_rtx);
+ new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
+ new_rtx = gen_rtx_CONST (Pmode, new_rtx);
emit_move_insn (temp, new_rtx);
new_rtx = gen_const_mem (Pmode, temp);
emit_move_insn (reg, new_rtx);
@@ -5051,7 +5051,7 @@ legitimize_tls_address (rtx addr, rtx reg)
new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
new_rtx = gen_rtx_CONST (Pmode, new_rtx);
new_rtx = force_const_mem (Pmode, new_rtx);
- temp = gen_reg_rtx (Pmode);
+ temp = gen_reg_rtx (Pmode);
emit_move_insn (temp, new_rtx);
new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
@@ -5149,9 +5149,9 @@ s390_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
else if (flag_pic)
{
if (SYMBOLIC_CONST (x)
- || (GET_CODE (x) == PLUS
- && (SYMBOLIC_CONST (XEXP (x, 0))
- || SYMBOLIC_CONST (XEXP (x, 1)))))
+ || (GET_CODE (x) == PLUS
+ && (SYMBOLIC_CONST (XEXP (x, 0))
+ || SYMBOLIC_CONST (XEXP (x, 1)))))
x = legitimize_pic_address (x, 0);
if (s390_legitimate_address_p (mode, x, FALSE))
@@ -5308,7 +5308,7 @@ s390_expand_movmem (rtx dst, rtx src, rtx len)
mode = GET_MODE (len);
if (mode == VOIDmode)
- mode = Pmode;
+ mode = Pmode;
dst_addr = gen_reg_rtx (Pmode);
src_addr = gen_reg_rtx (Pmode);
@@ -5327,12 +5327,12 @@ s390_expand_movmem (rtx dst, rtx src, rtx len)
temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
OPTAB_DIRECT);
if (temp != count)
- emit_move_insn (count, temp);
+ emit_move_insn (count, temp);
temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
OPTAB_DIRECT);
if (temp != blocks)
- emit_move_insn (blocks, temp);
+ emit_move_insn (blocks, temp);
emit_cmp_and_jump_insns (blocks, const0_rtx,
EQ, NULL_RTX, mode, 1, loop_end_label);
@@ -5366,7 +5366,7 @@ s390_expand_movmem (rtx dst, rtx src, rtx len)
temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
OPTAB_DIRECT);
if (temp != blocks)
- emit_move_insn (blocks, temp);
+ emit_move_insn (blocks, temp);
emit_cmp_and_jump_insns (blocks, const0_rtx,
EQ, NULL_RTX, mode, 1, loop_end_label);
@@ -5588,12 +5588,12 @@ s390_expand_cmpmem (rtx target, rtx op0, rtx op1, rtx len)
if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
{
if (INTVAL (len) > 0)
- {
- emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (INTVAL (len) - 1)));
- emit_insn (gen_cmpint (target, ccreg));
- }
+ {
+ emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (INTVAL (len) - 1)));
+ emit_insn (gen_cmpint (target, ccreg));
+ }
else
- emit_move_insn (target, const0_rtx);
+ emit_move_insn (target, const0_rtx);
}
else if (TARGET_MVCLE)
{
@@ -5610,7 +5610,7 @@ s390_expand_cmpmem (rtx target, rtx op0, rtx op1, rtx len)
mode = GET_MODE (len);
if (mode == VOIDmode)
- mode = Pmode;
+ mode = Pmode;
addr0 = gen_reg_rtx (Pmode);
addr1 = gen_reg_rtx (Pmode);
@@ -5629,12 +5629,12 @@ s390_expand_cmpmem (rtx target, rtx op0, rtx op1, rtx len)
temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
OPTAB_DIRECT);
if (temp != count)
- emit_move_insn (count, temp);
+ emit_move_insn (count, temp);
temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
OPTAB_DIRECT);
if (temp != blocks)
- emit_move_insn (blocks, temp);
+ emit_move_insn (blocks, temp);
emit_cmp_and_jump_insns (blocks, const0_rtx,
EQ, NULL_RTX, mode, 1, loop_end_label);
@@ -5674,7 +5674,7 @@ s390_expand_cmpmem (rtx target, rtx op0, rtx op1, rtx len)
temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
OPTAB_DIRECT);
if (temp != blocks)
- emit_move_insn (blocks, temp);
+ emit_move_insn (blocks, temp);
emit_cmp_and_jump_insns (blocks, const0_rtx,
EQ, NULL_RTX, mode, 1, loop_end_label);
@@ -5810,7 +5810,7 @@ s390_expand_vec_strlen (rtx target, rtx string, rtx alignment)
highest_index_to_load_reg, str_idx_reg));
add_reg_br_prob_note (s390_emit_jump (is_aligned_label, cond),
- profile_probability::very_unlikely ());
+ profile_probability::very_unlikely ());
expand_binop (Pmode, add_optab, str_idx_reg,
GEN_INT (-16), str_idx_reg, 1, OPTAB_DIRECT);
@@ -6044,7 +6044,7 @@ s390_expand_addcc (enum rtx_code cmp_code, rtx cmp_op0, rtx cmp_op1,
p = rtvec_alloc (2);
RTVEC_ELT (p, 0) =
- gen_rtx_SET (dst, op_res);
+ gen_rtx_SET (dst, op_res);
RTVEC_ELT (p, 1) =
gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
@@ -6111,7 +6111,7 @@ s390_expand_addcc (enum rtx_code cmp_code, rtx cmp_op0, rtx cmp_op1,
const0_rtx));
p = rtvec_alloc (2);
RTVEC_ELT (p, 0) =
- gen_rtx_SET (dst, op_res);
+ gen_rtx_SET (dst, op_res);
RTVEC_ELT (p, 1) =
gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
@@ -6942,8 +6942,8 @@ s390_expand_cs_tdsi (machine_mode mode, rtx btarget, rtx vtarget, rtx mem,
emit_insn (gen_rtx_SET (cc, gen_rtx_COMPARE (CCZmode, output, cmp)));
}
s390_emit_jump (skip_cs_label, gen_rtx_NE (VOIDmode, cc, const0_rtx));
- add_reg_br_prob_note (get_last_insn (),
- profile_probability::very_unlikely ());
+ add_reg_br_prob_note (get_last_insn (),
+ profile_probability::very_unlikely ());
/* If the jump is not taken, OUTPUT is the expected value. */
cmp = output;
/* Reload newval to a register manually, *after* the compare and jump
@@ -7210,12 +7210,12 @@ s390_delegitimize_address (rtx orig_x)
/* Extract the symbol ref from:
(plus:SI (reg:SI 12 %r12)
- (const:SI (unspec:SI [(symbol_ref/f:SI ("*.LC0"))]
- UNSPEC_GOTOFF/PLTOFF)))
+ (const:SI (unspec:SI [(symbol_ref/f:SI ("*.LC0"))]
+ UNSPEC_GOTOFF/PLTOFF)))
and
(plus:SI (reg:SI 12 %r12)
- (const:SI (plus:SI (unspec:SI [(symbol_ref:SI ("L"))]
- UNSPEC_GOTOFF/PLTOFF)
+ (const:SI (plus:SI (unspec:SI [(symbol_ref:SI ("L"))]
+ UNSPEC_GOTOFF/PLTOFF)
(const_int 4 [0x4])))) */
if (GET_CODE (x) == PLUS
&& REG_P (XEXP (x, 0))
@@ -7260,7 +7260,7 @@ s390_delegitimize_address (rtx orig_x)
{
/* Extract the symbol ref from:
(mem:QI (const:DI (unspec:DI [(symbol_ref:DI ("foo"))]
- UNSPEC_PLT/GOTENT))) */
+ UNSPEC_PLT/GOTENT))) */
y = XEXP (x, 0);
if (GET_CODE (y) == UNSPEC
@@ -7575,7 +7575,7 @@ print_operand_address (FILE *file, rtx addr)
if (ad.base && ad.indx)
fprintf (file, "(%s,%s)", reg_names[REGNO (ad.indx)],
- reg_names[REGNO (ad.base)]);
+ reg_names[REGNO (ad.base)]);
else if (ad.base)
fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
}
@@ -7609,7 +7609,7 @@ print_operand_address (FILE *file, rtx addr)
'o': print integer X as if it's an unsigned 32bit word.
's': "start" of contiguous bitmask X in either DImode or vector inner mode.
't': CONST_INT: "start" of contiguous bitmask X in SImode.
- CONST_VECTOR: Generate a bitmask for vgbm instruction.
+ CONST_VECTOR: Generate a bitmask for vgbm instruction.
'x': print integer X as if it's an unsigned halfword.
'v': print register number as vector register (v1 instead of f1).
*/
@@ -7667,7 +7667,7 @@ print_operand (FILE *file, rtx x, int code)
case 'O':
{
- struct s390_address ad;
+ struct s390_address ad;
int ret;
ret = s390_decompose_address (MEM_P (x) ? XEXP (x, 0) : x, &ad);
@@ -7680,16 +7680,16 @@ print_operand (FILE *file, rtx x, int code)
return;
}
- if (ad.disp)
- output_addr_const (file, ad.disp);
- else
- fprintf (file, "0");
+ if (ad.disp)
+ output_addr_const (file, ad.disp);
+ else
+ fprintf (file, "0");
}
return;
case 'R':
{
- struct s390_address ad;
+ struct s390_address ad;
int ret;
ret = s390_decompose_address (MEM_P (x) ? XEXP (x, 0) : x, &ad);
@@ -7702,10 +7702,10 @@ print_operand (FILE *file, rtx x, int code)
return;
}
- if (ad.base)
- fprintf (file, "%s", reg_names[REGNO (ad.base)]);
- else
- fprintf (file, "0");
+ if (ad.base)
+ fprintf (file, "%s", reg_names[REGNO (ad.base)]);
+ else
+ fprintf (file, "0");
}
return;
@@ -7851,13 +7851,13 @@ print_operand (FILE *file, rtx x, int code)
case CONST_WIDE_INT:
if (code == 'b')
- fprintf (file, HOST_WIDE_INT_PRINT_DEC,
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC,
CONST_WIDE_INT_ELT (x, 0) & 0xff);
else if (code == 'x')
- fprintf (file, HOST_WIDE_INT_PRINT_DEC,
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC,
CONST_WIDE_INT_ELT (x, 0) & 0xffff);
else if (code == 'h')
- fprintf (file, HOST_WIDE_INT_PRINT_DEC,
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC,
((CONST_WIDE_INT_ELT (x, 0) & 0xffff) ^ 0x8000) - 0x8000);
else
{
@@ -8060,7 +8060,7 @@ s390_adjust_priority (rtx_insn *insn, int priority)
priority = priority << 1;
break;
default:
- break;
+ break;
}
return priority;
}
@@ -8191,14 +8191,14 @@ annotate_constant_pool_refs (rtx *x)
for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
{
if (fmt[i] == 'e')
- {
- annotate_constant_pool_refs (&XEXP (*x, i));
- }
+ {
+ annotate_constant_pool_refs (&XEXP (*x, i));
+ }
else if (fmt[i] == 'E')
- {
- for (j = 0; j < XVECLEN (*x, i); j++)
- annotate_constant_pool_refs (&XVECEXP (*x, i, j));
- }
+ {
+ for (j = 0; j < XVECLEN (*x, i); j++)
+ annotate_constant_pool_refs (&XVECEXP (*x, i, j));
+ }
}
}
@@ -8226,13 +8226,13 @@ find_constant_pool_ref (rtx x, rtx *ref)
return;
gcc_assert (GET_CODE (x) != SYMBOL_REF
- || !CONSTANT_POOL_ADDRESS_P (x));
+ || !CONSTANT_POOL_ADDRESS_P (x));
if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_LTREF)
{
rtx sym = XVECEXP (x, 0, 0);
gcc_assert (GET_CODE (sym) == SYMBOL_REF
- && CONSTANT_POOL_ADDRESS_P (sym));
+ && CONSTANT_POOL_ADDRESS_P (sym));
if (*ref == NULL_RTX)
*ref = sym;
@@ -8246,14 +8246,14 @@ find_constant_pool_ref (rtx x, rtx *ref)
for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
{
if (fmt[i] == 'e')
- {
- find_constant_pool_ref (XEXP (x, i), ref);
- }
+ {
+ find_constant_pool_ref (XEXP (x, i), ref);
+ }
else if (fmt[i] == 'E')
- {
- for (j = 0; j < XVECLEN (x, i); j++)
- find_constant_pool_ref (XVECEXP (x, i, j), ref);
- }
+ {
+ for (j = 0; j < XVECLEN (x, i); j++)
+ find_constant_pool_ref (XVECEXP (x, i, j), ref);
+ }
}
}
@@ -8291,14 +8291,14 @@ replace_constant_pool_ref (rtx *x, rtx ref, rtx offset)
for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
{
if (fmt[i] == 'e')
- {
- replace_constant_pool_ref (&XEXP (*x, i), ref, offset);
- }
+ {
+ replace_constant_pool_ref (&XEXP (*x, i), ref, offset);
+ }
else if (fmt[i] == 'E')
- {
- for (j = 0; j < XVECLEN (*x, i); j++)
- replace_constant_pool_ref (&XVECEXP (*x, i, j), ref, offset);
- }
+ {
+ for (j = 0; j < XVECLEN (*x, i); j++)
+ replace_constant_pool_ref (&XVECEXP (*x, i, j), ref, offset);
+ }
}
}
@@ -8319,20 +8319,20 @@ find_ltrel_base (rtx x)
for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
{
if (fmt[i] == 'e')
- {
- rtx fnd = find_ltrel_base (XEXP (x, i));
+ {
+ rtx fnd = find_ltrel_base (XEXP (x, i));
if (fnd)
return fnd;
- }
+ }
else if (fmt[i] == 'E')
- {
- for (j = 0; j < XVECLEN (x, i); j++)
+ {
+ for (j = 0; j < XVECLEN (x, i); j++)
{
- rtx fnd = find_ltrel_base (XVECEXP (x, i, j));
+ rtx fnd = find_ltrel_base (XVECEXP (x, i, j));
if (fnd)
return fnd;
}
- }
+ }
}
return NULL_RTX;
@@ -8357,14 +8357,14 @@ replace_ltrel_base (rtx *x)
for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
{
if (fmt[i] == 'e')
- {
- replace_ltrel_base (&XEXP (*x, i));
- }
+ {
+ replace_ltrel_base (&XEXP (*x, i));
+ }
else if (fmt[i] == 'E')
- {
- for (j = 0; j < XVECLEN (*x, i); j++)
- replace_ltrel_base (&XVECEXP (*x, i, j));
- }
+ {
+ for (j = 0; j < XVECLEN (*x, i); j++)
+ replace_ltrel_base (&XVECEXP (*x, i, j));
+ }
}
}
@@ -8857,21 +8857,21 @@ s390_mainpool_finish (struct constant_pool *pool)
replace_ltrel_base (&PATTERN (insn));
if (NONJUMP_INSN_P (insn) || CALL_P (insn))
- {
- rtx addr, pool_ref = NULL_RTX;
- find_constant_pool_ref (PATTERN (insn), &pool_ref);
- if (pool_ref)
- {
+ {
+ rtx addr, pool_ref = NULL_RTX;
+ find_constant_pool_ref (PATTERN (insn), &pool_ref);
+ if (pool_ref)
+ {
if (s390_execute_label (insn))
addr = s390_find_execute (pool, insn);
else
addr = s390_find_constant (pool, get_pool_constant (pool_ref),
get_pool_mode (pool_ref));
- replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
- INSN_CODE (insn) = -1;
- }
- }
+ replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
+ INSN_CODE (insn) = -1;
+ }
+ }
}
@@ -9039,7 +9039,7 @@ s390_chunkify_start (void)
for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
if (LABEL_P (insn)
- && bitmap_bit_p (far_labels, CODE_LABEL_NUMBER (insn)))
+ && bitmap_bit_p (far_labels, CODE_LABEL_NUMBER (insn)))
{
struct constant_pool *pool = s390_find_pool (pool_list, insn);
if (pool)
@@ -9085,11 +9085,11 @@ s390_chunkify_finish (struct constant_pool *pool_list)
continue;
if (NONJUMP_INSN_P (insn) || CALL_P (insn))
- {
- rtx addr, pool_ref = NULL_RTX;
- find_constant_pool_ref (PATTERN (insn), &pool_ref);
- if (pool_ref)
- {
+ {
+ rtx addr, pool_ref = NULL_RTX;
+ find_constant_pool_ref (PATTERN (insn), &pool_ref);
+ if (pool_ref)
+ {
if (s390_execute_label (insn))
addr = s390_find_execute (curr_pool, insn);
else
@@ -9097,10 +9097,10 @@ s390_chunkify_finish (struct constant_pool *pool_list)
get_pool_constant (pool_ref),
get_pool_mode (pool_ref));
- replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
- INSN_CODE (insn) = -1;
- }
- }
+ replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
+ INSN_CODE (insn) = -1;
+ }
+ }
}
/* Dump out all literal pools. */
@@ -10416,9 +10416,9 @@ save_gprs (rtx base, int offset, int first, int last)
if (first == last)
{
if (TARGET_64BIT)
- insn = gen_movdi (addr, gen_rtx_REG (Pmode, first));
+ insn = gen_movdi (addr, gen_rtx_REG (Pmode, first));
else
- insn = gen_movsi (addr, gen_rtx_REG (Pmode, first));
+ insn = gen_movsi (addr, gen_rtx_REG (Pmode, first));
if (!global_not_special_regno_p (first))
RTX_FRAME_RELATED_P (insn) = 1;
@@ -10529,9 +10529,9 @@ restore_gprs (rtx base, int offset, int first, int last)
if (first == last)
{
if (TARGET_64BIT)
- insn = gen_movdi (gen_rtx_REG (Pmode, first), addr);
+ insn = gen_movdi (gen_rtx_REG (Pmode, first), addr);
else
- insn = gen_movsi (gen_rtx_REG (Pmode, first), addr);
+ insn = gen_movsi (gen_rtx_REG (Pmode, first), addr);
RTX_FRAME_RELATED_P (insn) = 1;
return insn;
@@ -11067,7 +11067,7 @@ s390_emit_prologue (void)
t, const0_rtx, const0_rtx));
}
}
- }
+ }
if (s390_warn_framesize > 0
&& cfun_frame_layout.frame_size >= s390_warn_framesize)
@@ -11215,7 +11215,7 @@ s390_emit_epilogue (bool sibcall)
algorithms located at the branch target. */
/* Emit a blockage here so that all code
- lies between the profiling mechanisms. */
+ lies between the profiling mechanisms. */
emit_insn (gen_blockage ());
emit_insn (gen_epilogue_tpf ());
@@ -11236,7 +11236,7 @@ s390_emit_epilogue (bool sibcall)
/* Nothing to restore. */
}
else if (DISP_IN_RANGE (cfun_frame_layout.frame_size + area_bottom)
- && DISP_IN_RANGE (cfun_frame_layout.frame_size + area_top - 1))
+ && DISP_IN_RANGE (cfun_frame_layout.frame_size + area_top - 1))
{
/* Area is in range. */
offset = cfun_frame_layout.frame_size;
@@ -11534,8 +11534,8 @@ s390_expand_split_stack_prologue (void)
LABEL_NUSES (call_done)++;
/* Mark the jump as very unlikely to be taken. */
- add_reg_br_prob_note (insn,
- profile_probability::very_unlikely ());
+ add_reg_br_prob_note (insn,
+ profile_probability::very_unlikely ());
if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
{
@@ -11803,7 +11803,7 @@ s390_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
if (type)
{
if (AGGREGATE_TYPE_P (type) && exact_log2 (size) < 0)
- return true;
+ return true;
if (TREE_CODE (type) == COMPLEX_TYPE
|| TREE_CODE (type) == VECTOR_TYPE)
@@ -11976,9 +11976,9 @@ s390_return_in_memory (const_tree type, const_tree fundecl ATTRIBUTE_UNUSED)
static machine_mode
s390_promote_function_mode (const_tree type, machine_mode mode,
- int *punsignedp,
- const_tree fntype ATTRIBUTE_UNUSED,
- int for_return ATTRIBUTE_UNUSED)
+ int *punsignedp,
+ const_tree fntype ATTRIBUTE_UNUSED,
+ int for_return ATTRIBUTE_UNUSED)
{
if (INTEGRAL_MODE_P (mode)
&& GET_MODE_SIZE (mode) < UNITS_PER_LONG)
@@ -12073,12 +12073,12 @@ s390_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
On S/390, va_list is an array type equivalent to
typedef struct __va_list_tag
- {
- long __gpr;
- long __fpr;
- void *__overflow_arg_area;
- void *__reg_save_area;
- } va_list[1];
+ {
+ long __gpr;
+ long __fpr;
+ void *__overflow_arg_area;
+ void *__reg_save_area;
+ } va_list[1];
where __gpr and __fpr hold the number of general purpose
or floating point arguments used up to now, respectively,
@@ -12182,14 +12182,14 @@ s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
if (cfun->va_list_fpr_size)
{
t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
- build_int_cst (NULL_TREE, n_fpr));
+ build_int_cst (NULL_TREE, n_fpr));
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
}
if (flag_split_stack
&& (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
- == NULL)
+ == NULL)
&& cfun->machine->split_stack_varargs_pointer == NULL_RTX)
{
rtx reg;
@@ -12217,9 +12217,9 @@ s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
|| TARGET_VX_ABI)
{
if (cfun->machine->split_stack_varargs_pointer == NULL_RTX)
- t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
+ t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
else
- t = make_tree (TREE_TYPE (ovf), cfun->machine->split_stack_varargs_pointer);
+ t = make_tree (TREE_TYPE (ovf), cfun->machine->split_stack_varargs_pointer);
off = INTVAL (crtl->args.arg_offset_rtx);
off = off < 0 ? 0 : off;
@@ -12255,7 +12255,7 @@ s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
if (integral value) {
if (size <= 4 && args.gpr < 5 ||
- size > 4 && args.gpr < 4 )
+ size > 4 && args.gpr < 4 )
ret = args.reg_save_area[args.gpr+8]
else
ret = *args.overflow_arg_area++;
@@ -12391,13 +12391,13 @@ s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
{
/*
if (reg > ((typeof (reg))max_reg))
- goto lab_false;
+ goto lab_false;
- addr = sav + sav_ofs + reg * save_scale;
+ addr = sav + sav_ofs + reg * save_scale;
goto lab_over;
- lab_false:
+ lab_false:
*/
lab_false = create_artificial_label (UNKNOWN_LOCATION);
@@ -12471,11 +12471,11 @@ s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
expanders.
DEST - Register location where CC will be stored.
TDB - Pointer to a 256 byte area where to store the transaction.
- diagnostic block. NULL if TDB is not needed.
+ diagnostic block. NULL if TDB is not needed.
RETRY - Retry count value. If non-NULL a retry loop for CC2
- is emitted
+ is emitted
CLOBBER_FPRS_P - If true clobbers for all FPRs are emitted as part
- of the tbegin instruction pattern. */
+ of the tbegin instruction pattern. */
void
s390_expand_tbegin (rtx dest, rtx tdb, rtx retry, bool clobber_fprs_p)
@@ -12619,17 +12619,17 @@ output_asm_nops (const char *user, int hw)
{
output_asm_insn ("brcl\t0,0", NULL);
hw -= 3;
- }
+ }
else if (hw >= 2)
- {
- output_asm_insn ("bc\t0,0", NULL);
- hw -= 2;
- }
+ {
+ output_asm_insn ("bc\t0,0", NULL);
+ hw -= 2;
+ }
else
- {
- output_asm_insn ("bcr\t0,0", NULL);
- hw -= 1;
- }
+ {
+ output_asm_insn ("bcr\t0,0", NULL);
+ hw -= 1;
+ }
}
}
@@ -12668,44 +12668,44 @@ s390_function_profiler (FILE *file, int labelno)
if (flag_fentry)
{
if (flag_nop_mcount)
- output_asm_nops ("-mnop-mcount", /* brasl */ 3);
+ output_asm_nops ("-mnop-mcount", /* brasl */ 3);
else if (cfun->static_chain_decl)
- warning (OPT_Wcannot_profile, "nested functions cannot be profiled "
- "with -mfentry on s390");
+ warning (OPT_Wcannot_profile, "nested functions cannot be profiled "
+ "with -mfentry on s390");
else
- output_asm_insn ("brasl\t0,%4", op);
+ output_asm_insn ("brasl\t0,%4", op);
}
else if (TARGET_64BIT)
{
if (flag_nop_mcount)
- output_asm_nops ("-mnop-mcount", /* stg */ 3 + /* larl */ 3 +
- /* brasl */ 3 + /* lg */ 3);
+ output_asm_nops ("-mnop-mcount", /* stg */ 3 + /* larl */ 3 +
+ /* brasl */ 3 + /* lg */ 3);
else
- {
- output_asm_insn ("stg\t%0,%1", op);
- if (flag_dwarf2_cfi_asm)
- output_asm_insn (".cfi_rel_offset\t%0,%7", op);
- output_asm_insn ("larl\t%2,%3", op);
- output_asm_insn ("brasl\t%0,%4", op);
- output_asm_insn ("lg\t%0,%1", op);
- if (flag_dwarf2_cfi_asm)
- output_asm_insn (".cfi_restore\t%0", op);
- }
+ {
+ output_asm_insn ("stg\t%0,%1", op);
+ if (flag_dwarf2_cfi_asm)
+ output_asm_insn (".cfi_rel_offset\t%0,%7", op);
+ output_asm_insn ("larl\t%2,%3", op);
+ output_asm_insn ("brasl\t%0,%4", op);
+ output_asm_insn ("lg\t%0,%1", op);
+ if (flag_dwarf2_cfi_asm)
+ output_asm_insn (".cfi_restore\t%0", op);
+ }
}
else
{
if (flag_nop_mcount)
- output_asm_nops ("-mnop-mcount", /* st */ 2 + /* larl */ 3 +
- /* brasl */ 3 + /* l */ 2);
+ output_asm_nops ("-mnop-mcount", /* st */ 2 + /* larl */ 3 +
+ /* brasl */ 3 + /* l */ 2);
else
- {
- output_asm_insn ("st\t%0,%1", op);
- if (flag_dwarf2_cfi_asm)
- output_asm_insn (".cfi_rel_offset\t%0,%7", op);
- output_asm_insn ("larl\t%2,%3", op);
- output_asm_insn ("brasl\t%0,%4", op);
- output_asm_insn ("l\t%0,%1", op);
- if (flag_dwarf2_cfi_asm)
+ {
+ output_asm_insn ("st\t%0,%1", op);
+ if (flag_dwarf2_cfi_asm)
+ output_asm_insn (".cfi_rel_offset\t%0,%7", op);
+ output_asm_insn ("larl\t%2,%3", op);
+ output_asm_insn ("brasl\t%0,%4", op);
+ output_asm_insn ("l\t%0,%1", op);
+ if (flag_dwarf2_cfi_asm)
output_asm_insn (".cfi_restore\t%0", op);
}
}
@@ -12830,8 +12830,8 @@ s390_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
output_asm_insn ("lay\t%1,%2(%1)", op);
else if (CONST_OK_FOR_K (delta))
output_asm_insn ("aghi\t%1,%2", op);
- else if (CONST_OK_FOR_Os (delta))
- output_asm_insn ("agfi\t%1,%2", op);
+ else if (CONST_OK_FOR_Os (delta))
+ output_asm_insn ("agfi\t%1,%2", op);
else
{
op[6] = gen_label_rtx ();
@@ -12853,12 +12853,12 @@ s390_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
output_asm_insn ("ag\t%4,0(%1)", op);
output_asm_insn ("ag\t%1,0(%4)", op);
}
- else if (CONST_OK_FOR_Os (vcall_offset))
- {
- output_asm_insn ("lgfi\t%4,%3", op);
- output_asm_insn ("ag\t%4,0(%1)", op);
- output_asm_insn ("ag\t%1,0(%4)", op);
- }
+ else if (CONST_OK_FOR_Os (vcall_offset))
+ {
+ output_asm_insn ("lgfi\t%4,%3", op);
+ output_asm_insn ("ag\t%4,0(%1)", op);
+ output_asm_insn ("ag\t%1,0(%4)", op);
+ }
else
{
op[7] = gen_label_rtx ();
@@ -12896,10 +12896,10 @@ s390_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
/* Setup base pointer if required. */
if (!vcall_offset
|| (!DISP_IN_RANGE (delta)
- && !CONST_OK_FOR_K (delta)
+ && !CONST_OK_FOR_K (delta)
&& !CONST_OK_FOR_Os (delta))
|| (!DISP_IN_RANGE (delta)
- && !CONST_OK_FOR_K (vcall_offset)
+ && !CONST_OK_FOR_K (vcall_offset)
&& !CONST_OK_FOR_Os (vcall_offset)))
{
op[5] = gen_label_rtx ();
@@ -12918,7 +12918,7 @@ s390_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
else if (CONST_OK_FOR_K (delta))
output_asm_insn ("ahi\t%1,%2", op);
else if (CONST_OK_FOR_Os (delta))
- output_asm_insn ("afi\t%1,%2", op);
+ output_asm_insn ("afi\t%1,%2", op);
else
{
op[6] = gen_label_rtx ();
@@ -12928,7 +12928,7 @@ s390_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
/* Perform vcall adjustment. */
if (vcall_offset)
- {
+ {
if (CONST_OK_FOR_J (vcall_offset))
{
output_asm_insn ("l\t%4,0(%1)", op);
@@ -12946,11 +12946,11 @@ s390_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
output_asm_insn ("a\t%1,0(%4)", op);
}
else if (CONST_OK_FOR_Os (vcall_offset))
- {
- output_asm_insn ("iilf\t%4,%3", op);
- output_asm_insn ("a\t%4,0(%1)", op);
- output_asm_insn ("a\t%1,0(%4)", op);
- }
+ {
+ output_asm_insn ("iilf\t%4,%3", op);
+ output_asm_insn ("a\t%4,0(%1)", op);
+ output_asm_insn ("a\t%1,0(%4)", op);
+ }
else
{
op[7] = gen_label_rtx ();
@@ -13175,10 +13175,10 @@ s390_call_saved_register_used (tree call_expr)
named. This only has an impact on vector argument register
usage none of which is call-saved. */
if (pass_by_reference (&cum_v, mode, type, true))
- {
- mode = Pmode;
- type = build_pointer_type (type);
- }
+ {
+ mode = Pmode;
+ type = build_pointer_type (type);
+ }
parm_rtx = s390_function_arg (cum, mode, type, true);
@@ -13188,10 +13188,10 @@ s390_call_saved_register_used (tree call_expr)
continue;
if (REG_P (parm_rtx))
- {
+ {
for (reg = 0; reg < REG_NREGS (parm_rtx); reg++)
if (!call_used_regs[reg + REGNO (parm_rtx)])
- return true;
+ return true;
}
if (GET_CODE (parm_rtx) == PARALLEL)
@@ -13263,8 +13263,8 @@ s390_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
TLS_CALL the location of the thread-local symbol
RESULT_REG the register where the result of the call should be stored
RETADDR_REG the register where the return address should be stored
- If this parameter is NULL_RTX the call is considered
- to be a sibling call. */
+ If this parameter is NULL_RTX the call is considered
+ to be a sibling call. */
rtx_insn *
s390_emit_call (rtx addr_location, rtx tls_call, rtx result_reg,
@@ -13284,9 +13284,9 @@ s390_emit_call (rtx addr_location, rtx tls_call, rtx result_reg,
if (GET_CODE (addr_location) == SYMBOL_REF)
{
/* When calling a global routine in PIC mode, we must
- replace the symbol itself with the PLT stub. */
+ replace the symbol itself with the PLT stub. */
if (flag_pic && !SYMBOL_REF_LOCAL_P (addr_location))
- {
+ {
if (TARGET_64BIT || retaddr_reg != NULL_RTX)
{
addr_location = gen_rtx_UNSPEC (Pmode,
@@ -13443,7 +13443,7 @@ s390_emit_tpf_eh_return (rtx target)
emit_move_insn (reg, target);
emit_move_insn (orig_ra, get_hard_reg_initial_val (Pmode, RETURN_REGNUM));
insn = s390_emit_call (s390_tpf_eh_return_symbol, NULL_RTX, reg,
- gen_rtx_REG (Pmode, RETURN_REGNUM));
+ gen_rtx_REG (Pmode, RETURN_REGNUM));
use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
use_reg (&CALL_INSN_FUNCTION_USAGE (insn), orig_ra);
@@ -15029,8 +15029,8 @@ s390_option_override_internal (struct gcc_options *opts,
/* Use the alternative scheduling-pressure algorithm by default. */
maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM, 2,
- opts->x_param_values,
- opts_set->x_param_values);
+ opts->x_param_values,
+ opts_set->x_param_values);
maybe_set_param_value (PARAM_MIN_VECT_LOOP_BOUND, 2,
opts->x_param_values,
@@ -15126,7 +15126,7 @@ s390_option_override (void)
{
/* Don't emit DWARF3/4 unless specifically selected. The TPF
debuggers do not yet support DWARF 3/4. */
- if (!global_options_set.x_dwarf_strict)
+ if (!global_options_set.x_dwarf_strict)
dwarf_strict = 1;
if (!global_options_set.x_dwarf_version)
dwarf_version = 2;
diff --git a/gcc/config/s390/s390.h b/gcc/config/s390/s390.h
index d0ccc49..4fb32b8 100644
--- a/gcc/config/s390/s390.h
+++ b/gcc/config/s390/s390.h
@@ -1,8 +1,8 @@
/* Definitions of target machine for GNU compiler, for IBM S/390
Copyright (C) 1999-2018 Free Software Foundation, Inc.
Contributed by Hartmut Penner (hpenner@de.ibm.com) and
- Ulrich Weigand (uweigand@de.ibm.com).
- Andreas Krebbel (Andreas.Krebbel@de.ibm.com)
+ Ulrich Weigand (uweigand@de.ibm.com).
+ Andreas Krebbel (Andreas.Krebbel@de.ibm.com)
This file is part of GCC.
@@ -202,13 +202,13 @@ enum processor_flags
#ifdef DEFAULT_TARGET_64BIT
#define TARGET_DEFAULT (MASK_64BIT | MASK_ZARCH | MASK_HARD_DFP \
- | MASK_OPT_HTM | MASK_OPT_VX)
+ | MASK_OPT_HTM | MASK_OPT_VX)
#else
#define TARGET_DEFAULT 0
#endif
/* Support for configure-time defaults. */
-#define OPTION_DEFAULT_SPECS \
+#define OPTION_DEFAULT_SPECS \
{ "mode", "%{!mesa:%{!mzarch:-m%(VALUE)}}" }, \
{ "arch", "%{!march=*:-march=%(VALUE)}" }, \
{ "tune", "%{!mtune=*:%{!march=*:-mtune=%(VALUE)}}" }
@@ -261,10 +261,10 @@ extern const char *s390_host_detect_local_cpu (int argc, const char **argv);
/* For signbit, the BFP-DFP-difference makes no difference. */
#define S390_TDC_SIGNBIT_SET (S390_TDC_NEGATIVE_ZERO \
- | S390_TDC_NEGATIVE_NORMALIZED_BFP_NUMBER \
- | S390_TDC_NEGATIVE_DENORMALIZED_BFP_NUMBER\
- | S390_TDC_NEGATIVE_INFINITY \
- | S390_TDC_NEGATIVE_QUIET_NAN \
+ | S390_TDC_NEGATIVE_NORMALIZED_BFP_NUMBER \
+ | S390_TDC_NEGATIVE_DENORMALIZED_BFP_NUMBER\
+ | S390_TDC_NEGATIVE_INFINITY \
+ | S390_TDC_NEGATIVE_QUIET_NAN \
| S390_TDC_NEGATIVE_SIGNALING_NAN )
#define S390_TDC_INFINITY (S390_TDC_POSITIVE_INFINITY \
@@ -418,51 +418,51 @@ extern const char *s390_host_detect_local_cpu (int argc, const char **argv);
All non-FP vector registers are call-clobbered v16-v31. */
#define FIXED_REGISTERS \
-{ 0, 0, 0, 0, \
- 0, 0, 0, 0, \
- 0, 0, 0, 0, \
+{ 0, 0, 0, 0, \
+ 0, 0, 0, 0, \
+ 0, 0, 0, 0, \
0, 1, 1, 1, \
- 0, 0, 0, 0, \
- 0, 0, 0, 0, \
- 0, 0, 0, 0, \
- 0, 0, 0, 0, \
+ 0, 0, 0, 0, \
+ 0, 0, 0, 0, \
+ 0, 0, 0, 0, \
+ 0, 0, 0, 0, \
1, 1, 1, 1, \
1, 1, \
- 0, 0, 0, 0, \
- 0, 0, 0, 0, \
- 0, 0, 0, 0, \
+ 0, 0, 0, 0, \
+ 0, 0, 0, 0, \
+ 0, 0, 0, 0, \
0, 0, 0, 0 }
#define CALL_USED_REGISTERS \
-{ 1, 1, 1, 1, \
- 1, 1, 0, 0, \
- 0, 0, 0, 0, \
+{ 1, 1, 1, 1, \
+ 1, 1, 0, 0, \
+ 0, 0, 0, 0, \
0, 1, 1, 1, \
- 1, 1, 1, 1, \
- 1, 1, 1, 1, \
- 1, 1, 1, 1, \
- 1, 1, 1, 1, \
1, 1, 1, 1, \
- 1, 1, \
- 1, 1, 1, 1, \
1, 1, 1, 1, \
- 1, 1, 1, 1, \
+ 1, 1, 1, 1, \
+ 1, 1, 1, 1, \
+ 1, 1, 1, 1, \
+ 1, 1, \
+ 1, 1, 1, 1, \
+ 1, 1, 1, 1, \
+ 1, 1, 1, 1, \
1, 1, 1, 1 }
#define CALL_REALLY_USED_REGISTERS \
-{ 1, 1, 1, 1, /* r0 - r15 */ \
- 1, 1, 0, 0, \
- 0, 0, 0, 0, \
+{ 1, 1, 1, 1, /* r0 - r15 */ \
+ 1, 1, 0, 0, \
+ 0, 0, 0, 0, \
0, 0, 0, 0, \
- 1, 1, 1, 1, /* f0 (16) - f15 (31) */ \
- 1, 1, 1, 1, \
- 1, 1, 1, 1, \
- 1, 1, 1, 1, \
+ 1, 1, 1, 1, /* f0 (16) - f15 (31) */ \
+ 1, 1, 1, 1, \
+ 1, 1, 1, 1, \
+ 1, 1, 1, 1, \
1, 1, 1, 1, /* arg, cc, fp, ret addr */ \
- 0, 0, /* a0 (36), a1 (37) */ \
- 1, 1, 1, 1, /* v16 (38) - v23 (45) */ \
+ 0, 0, /* a0 (36), a1 (37) */ \
+ 1, 1, 1, 1, /* v16 (38) - v23 (45) */ \
1, 1, 1, 1, \
- 1, 1, 1, 1, /* v24 (46) - v31 (53) */ \
+ 1, 1, 1, 1, /* v24 (46) - v31 (53) */ \
1, 1, 1, 1 }
/* Preferred register allocation order. */
@@ -470,7 +470,7 @@ extern const char *s390_host_detect_local_cpu (int argc, const char **argv);
{ 1, 2, 3, 4, 5, 0, 12, 11, 10, 9, 8, 7, 6, 14, 13, \
16, 17, 18, 19, 20, 21, 22, 23, \
24, 25, 26, 27, 28, 29, 30, 31, \
- 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, \
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, \
15, 32, 33, 34, 35, 36, 37 }
@@ -479,7 +479,7 @@ extern const char *s390_host_detect_local_cpu (int argc, const char **argv);
/* Maximum number of registers to represent a value of mode MODE
in a register of class CLASS. */
-#define CLASS_MAX_NREGS(CLASS, MODE) \
+#define CLASS_MAX_NREGS(CLASS, MODE) \
s390_class_max_nregs ((CLASS), (MODE))
/* We can reverse a CC mode safely if we know whether it comes from a
@@ -499,7 +499,7 @@ extern const char *s390_host_detect_local_cpu (int argc, const char **argv);
/* We use the following register classes:
GENERAL_REGS All general purpose registers
ADDR_REGS All general purpose registers except %r0
- (These registers can be used in address generation)
+ (These registers can be used in address generation)
FP_REGS All floating point registers
CC_REGS The condition code register
ACCESS_REGS The access registers
@@ -573,8 +573,8 @@ extern const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER];
/* Check whether REGNO is a hard register of the suitable class
or a pseudo register currently allocated to one such. */
#define REGNO_OK_FOR_INDEX_P(REGNO) \
- (((REGNO) < FIRST_PSEUDO_REGISTER \
- && REGNO_REG_CLASS ((REGNO)) == ADDR_REGS) \
+ (((REGNO) < FIRST_PSEUDO_REGISTER \
+ && REGNO_REG_CLASS ((REGNO)) == ADDR_REGS) \
|| ADDR_REGNO_P (reg_renumber[REGNO]))
#define REGNO_OK_FOR_BASE_P(REGNO) REGNO_OK_FOR_INDEX_P (REGNO)
@@ -745,7 +745,7 @@ CUMULATIVE_ARGS;
/* Profiling. */
-#define FUNCTION_PROFILER(FILE, LABELNO) \
+#define FUNCTION_PROFILER(FILE, LABELNO) \
s390_function_profiler ((FILE), ((LABELNO)))
#define PROFILE_BEFORE_PROLOGUE 1
diff --git a/gcc/config/s390/tpf.h b/gcc/config/s390/tpf.h
index 5a363f0..9e91244 100644
--- a/gcc/config/s390/tpf.h
+++ b/gcc/config/s390/tpf.h
@@ -1,8 +1,8 @@
/* Definitions for target OS TPF for GNU compiler, for IBM S/390 hardware
Copyright (C) 2003-2018 Free Software Foundation, Inc.
Contributed by P.J. Darcy (darcypj@us.ibm.com),
- Hartmut Penner (hpenner@de.ibm.com), and
- Ulrich Weigand (uweigand@de.ibm.com).
+ Hartmut Penner (hpenner@de.ibm.com), and
+ Ulrich Weigand (uweigand@de.ibm.com).
This file is part of GCC.
@@ -44,7 +44,7 @@ along with GCC; see the file COPYING3. If not see
/* TPF OS specific stack-pointer offset. */
#undef STACK_POINTER_OFFSET
-#define STACK_POINTER_OFFSET 448
+#define STACK_POINTER_OFFSET 448
/* When building for TPF, set a generic default target that is 64 bits. Also
enable TPF profiling support and the standard backchain by default. */
@@ -105,10 +105,10 @@ along with GCC; see the file COPYING3. If not see
#undef LIB_SPEC
#define LIB_SPEC "-lCTIS -lCISO -lCLBM -lCTAL -lCFVS -lCTBX -lCTXO \
- -lCJ00 -lCTDF -lCOMX -lCOMS -lCTHD -lCTAD -lTPFSTUB"
+ -lCJ00 -lCTDF -lCOMX -lCOMS -lCTHD -lCTAD -lTPFSTUB"
#define ENTRY_SPEC "%{mmain:-entry=_start} \
- %{!mmain:-entry=0}"
+ %{!mmain:-entry=0}"
/* All linking is done shared on TPF-OS. */
/* FIXME: When binutils patch for new emulation is committed