aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorJeffrey A Law <law@cygnus.com>2000-04-11 20:02:46 +0000
committerJeff Law <law@gcc.gnu.org>2000-04-11 14:02:46 -0600
commit520babc78350da8210931f4e60114402d99ec157 (patch)
treef4ae9c77c443c3961448209fd35398526106468e /gcc
parentcba6a0b29f3a543c4249edd018cdda26ec2c7cf1 (diff)
downloadgcc-520babc78350da8210931f4e60114402d99ec157.zip
gcc-520babc78350da8210931f4e60114402d99ec157.tar.gz
gcc-520babc78350da8210931f4e60114402d99ec157.tar.bz2
long-double.h (FIX_TRUNCTFSI2_LIBCALL): Tweak for PA64.
* pa/long-double.h (FIX_TRUNCTFSI2_LIBCALL): Tweak for PA64. * pa/pa-protos.h (output_call): Add additional argument indicating if the call is a sibling/tail call. (compute_zdepdi_operands, output_64bit_and): Prototype new functions. (compute_64bit_ior, cmpib_comparison_operator): Likewise. (function_arg, function_arg_partial_nregs): Likewise * pa/pa.c (override_options): Always set flag_pic for TARGET_64BIT. (emit_move_sequence): Zero extend certain constants as needed for TARGET_64BIT. (compute_zdepdi_operands, output_64bit_and): New functions. (output_64bit_ior, function_arg): Likewise. (cmpib_comparison_operator, function_arg_partial_nregs): Likewise. (compute_frame_size, hppa_expand_prologue): Handle TARGET_64BIT. (hppa_expand_epilogue, return_addr_rtx, hppa_va_arg): Likewise. (hppa_builtin_saveregs, output_cbranch, output_bb): Likewise. (output_bvb): Likewise. (output_millicode_call): Return pointer is in %r2 for TARGET_64BIT. (output_call): New argument 'sibcall'. Generate sibcall sequences as needed. (print_operand); Handle cases 'Q', 'p', and 'z' for TARGET_64BIT. For (ouput_arg_descriptor): Do not emit argument descriptors for TARGET_64BIT. * pa/pa.h (TARGET_PA_11, TARGET_PA_20): Only define if not already defined. (TARGET_64BIT, PROMOTE_FUNCTION_RETURN): Define. (FUNCTION_OK_FOR_SIBALL): Define. (CPP_PA10_SPEC, CPP_PA11_SPEC, CPP_PA20_SPEC): Define. (CPP_CPU_DEFAULT_SPEC, SUBTARGET_EXTRA_SPECS, EXTRA_SPECS): Likewise. (CPP_SPEC): Use new spec infrastructure. (BITS_PER_WORD, UNITS_PER_WORD): Handle TARGET_64BIT. (STACK_BOUNDARY, FUNCTION_BOUNDARY, PIC_OFFSET_TABLE_REGNUM): Likewise. (RETURN_IN_MEMORY, EXTRA_CONSTRAINT, FIRST_PARM_OFFSET): Likewise. (REG_PARM_STACK_SPACE, STACK_POINTER_OFFSET): Likewise. (STACK_DYNAMIC_OFFSET, FUNCTION_VALUE): Likewise. (FUNCTION_ARG_PASS_BY_REFERENCE, FUNCTION_ARG_CALLEE_COPIES): Likewise. (TRAMPOLINE_TEMPLATE, TRAMPOLINE_SIZE): Likewise. (INITIALIZE_TRAMPOLINE, LEGITIMATE_CONSTANT_P): Likewise. (CONST_OK_FOR_LETTER_P, MOVE_RATIO): Likewise. (FUNCTION_ARG); Call out to C code. (FUNCTION_ARG_PARTIAL_NREGS): Likewise. (MAX_BITS_PER_WORD, MAX_LONG_TYPE_SIZE, MAX_WCHAR_TYPE_SIZE): Define. (MIN_UNITS_PER_WORD): Likewise. * pa/pa.md (cmpdi): New expander. (scc patterns, movstrsi): Not available for TARGET_64BIT. (64bit conditional arithmetic): New patterns. (absdi2, smindi3, umindi3, smaxdi3, umaxdi3): New patterns. (movsicc): Not available if modes on all the operands to not match. (movdicc): New expander and associated patterns. (64bit branches): New patterns. (pre_load, post_store): Generate appropriate code for TARGET_64BIT. (pre_ldd, post_std): New patterns. (64bit addil, load low part): New patterns. (special movsf constant): Not available for TARGET_64BIT. (movsf, movdf expanders): Force constants into memory. (32bit movdf/movdi patterns): Disable for TARGET_64BIT. (64bit movdf/movdi patterns): New patterns. (zero_extendqidi2, zero_extendhidi2, zero_extendsidi2): New patterns for TARGET_64BIT. (extendqidi2, extendhidi2, extendsidi2): Similarly. (adddi3 expander): Allow "arith_operand" for second input. (32bit adddi3, subdi3, uaddcm): Disable for TARGET_64BIT. (64bit adddi3, subsi3, uaddcm): New patterns for TARGET_64BIT. (mulsi3 expander): Revamp slightly so it supports TARGET_64BIT too. (muldi3): New expander for TARGET_64BIT. (divsi3, udivsi3, modsi3, umodsi3): Fourth operand must be (reg:SI 2) for TARGET_64BIT. (32bit anddi3, iordi3, xordi3, andcm, negdi2, uaddcm): Disable patterns for TARGET_64BIT. (64bit anddi3, iordi3, xordi3, andcm, negdi2, uaddcm, shadd): New patterns for TARGET_64BIT. (64bit bit insertion/extractions): New patterns for TARGET_64BIT. (64bit shifts/rotates): New patterns/expanders for TARGET_64BIT. (sibcall_epilogue): New expander. (casesi): Tweak for TARGET_64BIT. (call expanders): Set & use the outgoing argument pointer. Use the 64bit call patterns as needed. Add additional arg to output_call. (call_internal_reg_64bit, call_value_internal_reg_64bit): New pattern. (sibcall, sibcall_internal_symref): New expanders. (sibcall_value, sibcall_value_internal_symref (interspace_jump): Turn into an expander + matching patterns. (canonicalize_funcptr_for_compare): Not needed for TARGET_64BIT. * pa/pa64-regs.h: Eliminate trigraph sequences. * pa/pa64-start.h (TARGET_PA_20): Fix typo. From-SVN: r33082
Diffstat (limited to 'gcc')
-rw-r--r--gcc/ChangeLog86
-rw-r--r--gcc/config/pa/long_double.h5
-rw-r--r--gcc/config/pa/pa-protos.h18
-rw-r--r--gcc/config/pa/pa.c636
-rw-r--r--gcc/config/pa/pa.h366
-rw-r--r--gcc/config/pa/pa.md1571
-rw-r--r--gcc/config/pa/pa64-regs.h2
-rw-r--r--gcc/config/pa/pa64-start.h2
8 files changed, 2479 insertions, 207 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index f88ed08..2cacd99 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,89 @@
+Tue Apr 11 09:55:59 2000 Jeffrey A Law (law@cygnus.com)
+
+ * pa/long-double.h (FIX_TRUNCTFSI2_LIBCALL): Tweak for PA64.
+ * pa/pa-protos.h (output_call): Add additional argument indicating
+ if the call is a sibling/tail call.
+ (compute_zdepdi_operands, output_64bit_and): Prototype new functions.
+ (compute_64bit_ior, cmpib_comparison_operator): Likewise.
+ (function_arg, function_arg_partial_nregs): Likewise
+ * pa/pa.c (override_options): Always set flag_pic for TARGET_64BIT.
+ (emit_move_sequence): Zero extend certain constants as needed
+ for TARGET_64BIT.
+ (compute_zdepdi_operands, output_64bit_and): New functions.
+ (output_64bit_ior, function_arg): Likewise.
+ (cmpib_comparison_operator, function_arg_partial_nregs): Likewise.
+ (compute_frame_size, hppa_expand_prologue): Handle TARGET_64BIT.
+ (hppa_expand_epilogue, return_addr_rtx, hppa_va_arg): Likewise.
+ (hppa_builtin_saveregs, output_cbranch, output_bb): Likewise.
+ (output_bvb): Likewise.
+ (output_millicode_call): Return pointer is in %r2 for TARGET_64BIT.
+ (output_call): New argument 'sibcall'. Generate sibcall sequences
+ as needed.
+ (print_operand); Handle cases 'Q', 'p', and 'z' for TARGET_64BIT. For
+ (ouput_arg_descriptor): Do not emit argument descriptors for
+ TARGET_64BIT.
+ * pa/pa.h (TARGET_PA_11, TARGET_PA_20): Only define if not already
+ defined.
+ (TARGET_64BIT, PROMOTE_FUNCTION_RETURN): Define.
+ (FUNCTION_OK_FOR_SIBALL): Define.
+ (CPP_PA10_SPEC, CPP_PA11_SPEC, CPP_PA20_SPEC): Define.
+ (CPP_CPU_DEFAULT_SPEC, SUBTARGET_EXTRA_SPECS, EXTRA_SPECS): Likewise.
+ (CPP_SPEC): Use new spec infrastructure.
+ (BITS_PER_WORD, UNITS_PER_WORD): Handle TARGET_64BIT.
+ (STACK_BOUNDARY, FUNCTION_BOUNDARY, PIC_OFFSET_TABLE_REGNUM): Likewise.
+ (RETURN_IN_MEMORY, EXTRA_CONSTRAINT, FIRST_PARM_OFFSET): Likewise.
+ (REG_PARM_STACK_SPACE, STACK_POINTER_OFFSET): Likewise.
+ (STACK_DYNAMIC_OFFSET, FUNCTION_VALUE): Likewise.
+ (FUNCTION_ARG_PASS_BY_REFERENCE, FUNCTION_ARG_CALLEE_COPIES): Likewise.
+ (TRAMPOLINE_TEMPLATE, TRAMPOLINE_SIZE): Likewise.
+ (INITIALIZE_TRAMPOLINE, LEGITIMATE_CONSTANT_P): Likewise.
+ (CONST_OK_FOR_LETTER_P, MOVE_RATIO): Likewise.
+ (FUNCTION_ARG); Call out to C code.
+ (FUNCTION_ARG_PARTIAL_NREGS): Likewise.
+ (MAX_BITS_PER_WORD, MAX_LONG_TYPE_SIZE, MAX_WCHAR_TYPE_SIZE): Define.
+ (MIN_UNITS_PER_WORD): Likewise.
+ * pa/pa.md (cmpdi): New expander.
+ (scc patterns, movstrsi): Not available for TARGET_64BIT.
+ (64bit conditional arithmetic): New patterns.
+ (absdi2, smindi3, umindi3, smaxdi3, umaxdi3): New patterns.
+ (movsicc): Not available if modes on all the operands to not match.
+ (movdicc): New expander and associated patterns.
+ (64bit branches): New patterns.
+ (pre_load, post_store): Generate appropriate code for TARGET_64BIT.
+ (pre_ldd, post_std): New patterns.
+ (64bit addil, load low part): New patterns.
+ (special movsf constant): Not available for TARGET_64BIT.
+ (movsf, movdf expanders): Force constants into memory.
+ (32bit movdf/movdi patterns): Disable for TARGET_64BIT.
+ (64bit movdf/movdi patterns): New patterns.
+ (zero_extendqidi2, zero_extendhidi2, zero_extendsidi2): New patterns
+ for TARGET_64BIT.
+ (extendqidi2, extendhidi2, extendsidi2): Similarly.
+ (adddi3 expander): Allow "arith_operand" for second input.
+ (32bit adddi3, subdi3, uaddcm): Disable for TARGET_64BIT.
+ (64bit adddi3, subsi3, uaddcm): New patterns for TARGET_64BIT.
+ (mulsi3 expander): Revamp slightly so it supports TARGET_64BIT too.
+ (muldi3): New expander for TARGET_64BIT.
+ (divsi3, udivsi3, modsi3, umodsi3): Fourth operand must be (reg:SI 2)
+ for TARGET_64BIT.
+ (32bit anddi3, iordi3, xordi3, andcm, negdi2, uaddcm): Disable
+ patterns for TARGET_64BIT.
+ (64bit anddi3, iordi3, xordi3, andcm, negdi2, uaddcm, shadd): New
+ patterns for TARGET_64BIT.
+ (64bit bit insertion/extractions): New patterns for TARGET_64BIT.
+ (64bit shifts/rotates): New patterns/expanders for TARGET_64BIT.
+ (sibcall_epilogue): New expander.
+ (casesi): Tweak for TARGET_64BIT.
+ (call expanders): Set & use the outgoing argument pointer. Use the
+ 64bit call patterns as needed. Add additional arg to output_call.
+ (call_internal_reg_64bit, call_value_internal_reg_64bit): New pattern.
+ (sibcall, sibcall_internal_symref): New expanders.
+ (sibcall_value, sibcall_value_internal_symref
+ (interspace_jump): Turn into an expander + matching patterns.
+ (canonicalize_funcptr_for_compare): Not needed for TARGET_64BIT.
+ * pa/pa64-regs.h: Eliminate trigraph sequences.
+ * pa/pa64-start.h (TARGET_PA_20): Fix typo.
+
2000-04-11 Zack Weinberg <zack@wolery.cumb.org>
* cppexp.c, cpphash.c, cpphash.h, cpplex.c, cpplib.c,
diff --git a/gcc/config/pa/long_double.h b/gcc/config/pa/long_double.h
index 4978855..ea2b1d3 100644
--- a/gcc/config/pa/long_double.h
+++ b/gcc/config/pa/long_double.h
@@ -48,7 +48,10 @@ do { long value[4]; \
#define TRUNCTFDF2_LIBCALL "_U_Qfcnvff_quad_to_dbl"
#define FLOATSITF2_LIBCALL "_U_Qfcnvxf_sgl_to_quad"
#define FLOATDITF2_LIBCALL "_U_Qfcnvxf_dbl_to_quad"
-#define FIX_TRUNCTFSI2_LIBCALL "_U_Qfcnvfxt_quad_to_sgl"
+/* We need to put a wrapper function around _U_Qfcnvfxt_quad_to_sgl so that
+ we can massage its return value for PA64. */
+#define FIX_TRUNCTFSI2_LIBCALL \
+ (TARGET_64BIT ? "__U_Qfcnvfxt_quad_to_sgl" : "_U_Qfcnvfxt_quad_to_sgl")
#define FIX_TRUNCTFDI2_LIBCALL "_U_Qfcnvfxt_quad_to_dbl"
#define EQTF2_LIBCALL "_U_Qfeq"
#define NETF2_LIBCALL "_U_Qfne"
diff --git a/gcc/config/pa/pa-protos.h b/gcc/config/pa/pa-protos.h
index ec90583..98ea1bf 100644
--- a/gcc/config/pa/pa-protos.h
+++ b/gcc/config/pa/pa-protos.h
@@ -50,7 +50,7 @@ extern const char *output_dbra PARAMS ((rtx *, rtx, int));
extern const char *output_movb PARAMS ((rtx *, rtx, int, int));
extern const char *output_parallel_movb PARAMS ((rtx *, int));
extern const char *output_parallel_addb PARAMS ((rtx *, int));
-extern const char *output_call PARAMS ((rtx, rtx));
+extern const char *output_call PARAMS ((rtx, rtx, int));
extern const char *output_millicode_call PARAMS ((rtx, rtx));
extern const char *output_mul_insn PARAMS ((int, rtx));
extern const char *output_div_insn PARAMS ((rtx *, int, rtx));
@@ -141,7 +141,23 @@ extern void hppa_expand_prologue PARAMS ((void));
extern void hppa_expand_epilogue PARAMS ((void));
extern int hppa_can_use_return_insn_p PARAMS ((void));
extern int ior_mask_p PARAMS ((unsigned HOST_WIDE_INT));
+extern void compute_zdepdi_operands PARAMS ((unsigned HOST_WIDE_INT,
+ unsigned *));
+#ifdef RTX_CODE
+extern char * output_64bit_and PARAMS ((rtx *));
+extern char * output_64bit_ior PARAMS ((rtx *));
+extern int cmpib_comparison_operator PARAMS ((rtx, enum machine_mode));
+#endif
+
+
#ifdef TREE_CODE
extern int reloc_needed PARAMS ((tree));
+#ifdef RTX_CODE
+extern rtx function_arg PARAMS ((CUMULATIVE_ARGS *, enum machine_mode,
+ tree, int, int));
+#endif
+extern int function_arg_partial_nregs PARAMS ((CUMULATIVE_ARGS *,
+ enum machine_mode,
+ tree, int));
#endif /* TREE_CODE */
diff --git a/gcc/config/pa/pa.c b/gcc/config/pa/pa.c
index f47ce37..d9b7f8e 100644
--- a/gcc/config/pa/pa.c
+++ b/gcc/config/pa/pa.c
@@ -189,6 +189,10 @@ override_options ()
write_symbols = NO_DEBUG;
}
+ /* We always generate PIC code when in 64bit mode. */
+ if (TARGET_64BIT)
+ flag_pic = 2;
+
/* Register global variables with the garbage collector. */
pa_add_gc_roots ();
}
@@ -1562,6 +1566,25 @@ emit_move_sequence (operands, mode, scratch_reg)
|| ! cint_ok_for_move (INTVAL (operand1)))
{
rtx temp;
+ int need_zero_extend = 0;
+
+ if (TARGET_64BIT && GET_CODE (operand1) == CONST_INT
+ && GET_MODE_BITSIZE (GET_MODE (operand0)) > 32)
+ {
+ HOST_WIDE_INT val = INTVAL (operand1);
+ HOST_WIDE_INT nval = INTVAL (operand1);
+
+ /* If the value is the same after a 32->64bit sign
+ extension, then we can use it as-is. Else we will
+ need to sign extend the constant from 32->64bits
+ then zero extend the result from 32->64bits. */
+ nval = ((val & 0xffffffff) ^ (~0x7fffffff)) + 0x80000000;
+ if (val != nval)
+ {
+ need_zero_extend = 1;
+ operand1 = GEN_INT (nval);
+ }
+ }
if (reload_in_progress || reload_completed)
temp = operand0;
@@ -1571,6 +1594,17 @@ emit_move_sequence (operands, mode, scratch_reg)
emit_insn (gen_rtx_SET (VOIDmode, temp,
gen_rtx_HIGH (mode, operand1)));
operands[1] = gen_rtx_LO_SUM (mode, temp, operand1);
+ emit_move_insn (operands[0], operands[1]);
+
+ if (need_zero_extend)
+ {
+ emit_insn (gen_zero_extendsidi2 (operands[0],
+ gen_rtx_SUBREG (SImode,
+ operands[0],
+ 0)));
+ }
+
+ return 1;
}
}
/* Now have insn-emit do whatever it normally does. */
@@ -1729,6 +1763,46 @@ compute_zdepwi_operands (imm, op)
op[2] = len;
}
+/* Compute position (in OP[1]) and width (in OP[2])
+ useful for copying IMM to a register using the depdi,z
+ instructions. Store the immediate value to insert in OP[0]. */
+void
+compute_zdepdi_operands (imm, op)
+ unsigned HOST_WIDE_INT imm;
+ unsigned *op;
+{
+ HOST_WIDE_INT lsb, len;
+
+ /* Find the least significant set bit in IMM. */
+ for (lsb = 0; lsb < HOST_BITS_PER_WIDE_INT; lsb++)
+ {
+ if ((imm & 1) != 0)
+ break;
+ imm >>= 1;
+ }
+
+ /* Choose variants based on *sign* of the 5-bit field. */
+ if ((imm & 0x10) == 0)
+ len = ((lsb <= HOST_BITS_PER_WIDE_INT - 4)
+ ? 4 : HOST_BITS_PER_WIDE_INT - lsb);
+ else
+ {
+ /* Find the width of the bitstring in IMM. */
+ for (len = 5; len < HOST_BITS_PER_WIDE_INT; len++)
+ {
+ if ((imm & ((unsigned HOST_WIDE_INT)1 << len)) == 0)
+ break;
+ }
+
+ /* Sign extend IMM as a 5-bit value. */
+ imm = (imm & 0xf) - 0x10;
+ }
+
+ op[0] = imm;
+ op[1] = 63 - lsb;
+ op[2] = len;
+}
+
/* Output assembler code to perform a doubleword move insn
with operands OPERANDS. */
@@ -2264,6 +2338,59 @@ output_and (operands)
return "and %1,%2,%0";
}
+/* Return a string to perform a bitwise-and of operands[1] with operands[2]
+ storing the result in operands[0]. */
+char *
+output_64bit_and (operands)
+ rtx *operands;
+{
+ if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
+ {
+ unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
+ unsigned HOST_WIDE_INT ls0, ls1, ms0, p, len;
+
+ for (ls0 = 0; ls0 < HOST_BITS_PER_WIDE_INT; ls0++)
+ if ((mask & ((unsigned HOST_WIDE_INT)1 << ls0)) == 0)
+ break;
+
+ for (ls1 = ls0; ls1 < HOST_BITS_PER_WIDE_INT; ls1++)
+ if ((mask & ((unsigned HOST_WIDE_INT)1 << ls1)) != 0)
+ break;
+
+ for (ms0 = ls1; ms0 < HOST_BITS_PER_WIDE_INT; ms0++)
+ if ((mask & ((unsigned HOST_WIDE_INT)1 << ms0)) == 0)
+ break;
+
+ if (ms0 != HOST_BITS_PER_WIDE_INT)
+ abort();
+
+ if (ls1 == HOST_BITS_PER_WIDE_INT)
+ {
+ len = ls0;
+
+ if (len == 0)
+ abort ();
+
+ operands[2] = GEN_INT (len);
+ return "extrd,u %1,63,%2,%0";
+ }
+ else
+ {
+ /* We could use this `depi' for the case above as well, but `depi'
+ requires one more register file access than an `extru'. */
+
+ p = 63 - ls0;
+ len = ls1 - ls0;
+
+ operands[2] = GEN_INT (p);
+ operands[3] = GEN_INT (len);
+ return "depdi 0,%2,%3,%0";
+ }
+ }
+ else
+ return "and %1,%2,%0";
+}
+
const char *
output_ior (operands)
rtx *operands;
@@ -2292,6 +2419,38 @@ output_ior (operands)
operands[3] = GEN_INT (len);
return "{depi|depwi} -1,%2,%3,%0";
}
+
+/* Return a string to perform a bitwise-and of operands[1] with operands[2]
+ storing the result in operands[0]. */
+char *
+output_64bit_ior (operands)
+ rtx *operands;
+{
+ unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
+ unsigned HOST_WIDE_INT bs0, bs1, p, len;
+
+ if (INTVAL (operands[2]) == 0)
+ return "copy %1,%0";
+
+ for (bs0 = 0; bs0 < HOST_BITS_PER_WIDE_INT; bs0++)
+ if ((mask & ((unsigned HOST_WIDE_INT)1 << bs0)) != 0)
+ break;
+
+ for (bs1 = bs0; bs1 < HOST_BITS_PER_WIDE_INT; bs1++)
+ if ((mask & ((unsigned HOST_WIDE_INT)1 << bs1)) == 0)
+ break;
+
+ if (bs1 != HOST_BITS_PER_WIDE_INT
+ && ((unsigned HOST_WIDE_INT) 1 << bs1) <= mask)
+ abort();
+
+ p = 63 - bs0;
+ len = bs1 - bs0;
+
+ operands[2] = GEN_INT (p);
+ operands[3] = GEN_INT (len);
+ return "depdi -1,%2,%3,%0";
+}
/* Output an ascii string. */
void
@@ -2676,7 +2835,8 @@ compute_frame_size (size, fregs_live)
/* Account for space used by the callee floating point register saves. */
for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
- if (regs_ever_live[i] || regs_ever_live[i + 1])
+ if (regs_ever_live[i]
+ || (! TARGET_64BIT && regs_ever_live[i + 1]))
{
if (fregs_live)
*fregs_live = 1;
@@ -2693,8 +2853,9 @@ compute_frame_size (size, fregs_live)
/* Allocate space for the fixed frame marker. This space must be
allocated for any function that makes calls or otherwise allocates
stack space. */
- if (! leaf_function_p () || fsize)
+ if (!current_function_is_leaf || fsize)
fsize += 32;
+
return (fsize + STACK_BOUNDARY - 1) & ~(STACK_BOUNDARY - 1);
}
@@ -2796,8 +2957,12 @@ hppa_expand_prologue()
size_rtx = GEN_INT (actual_fsize);
/* Save RP first. The calling conventions manual states RP will
- always be stored into the caller's frame at sp-20. */
- if (regs_ever_live[2] || profile_flag)
+ always be stored into the caller's frame at sp-20 or sp - 16
+ depending on which ABI is in use. */
+ if ((regs_ever_live[2] || profile_flag) && TARGET_64BIT)
+ store_reg (2, -16, STACK_POINTER_REGNUM);
+
+ if ((regs_ever_live[2] || profile_flag) && ! TARGET_64BIT)
store_reg (2, -20, STACK_POINTER_REGNUM);
/* Allocate the local frame and set up the frame pointer if needed. */
@@ -2856,7 +3021,7 @@ hppa_expand_prologue()
for functions which make no calls and allocate no frame? Do
we need to allocate a frame, or can we just omit the save? For
now we'll just omit the save. */
- if (actual_fsize != 0 && flag_pic)
+ if (actual_fsize != 0 && flag_pic && !TARGET_64BIT)
store_reg (PIC_OFFSET_TABLE_REGNUM, -32, STACK_POINTER_REGNUM);
/* Profiling code.
@@ -2982,7 +3147,8 @@ hppa_expand_prologue()
/* Now actually save the FP registers. */
for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
{
- if (regs_ever_live[i] || regs_ever_live[i + 1])
+ if (regs_ever_live[i]
+ || (! TARGET_64BIT && regs_ever_live[i + 1]))
{
emit_move_insn (gen_rtx_MEM (DFmode,
gen_rtx_POST_INC (DFmode, tmpreg)),
@@ -3066,9 +3232,17 @@ hppa_expand_epilogue ()
RP gets used in the return (bv) instruction. This appears to still
be necessary even when we schedule the prologue and epilogue. */
if (frame_pointer_needed
+ && !TARGET_64BIT
&& (regs_ever_live [2] || profile_flag))
load_reg (2, -20, FRAME_POINTER_REGNUM);
-
+ else if (TARGET_64BIT && frame_pointer_needed
+ && (regs_ever_live[2] || profile_flag))
+ load_reg (2, -16, FRAME_POINTER_REGNUM);
+ else if (TARGET_64BIT
+ && ! frame_pointer_needed
+ && (regs_ever_live[2] || profile_flag)
+ && VAL_14_BITS_P (actual_fsize + 20))
+ load_reg (2, - (actual_fsize + 16), STACK_POINTER_REGNUM);
/* No frame pointer, and stack is smaller than 8k. */
else if (! frame_pointer_needed
&& VAL_14_BITS_P (actual_fsize + 20)
@@ -3120,7 +3294,8 @@ hppa_expand_epilogue ()
/* Actually do the restores now. */
for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
{
- if (regs_ever_live[i] || regs_ever_live[i + 1])
+ if (regs_ever_live[i]
+ || (! TARGET_64BIT && regs_ever_live[i + 1]))
{
emit_move_insn (gen_rtx_REG (DFmode, i),
gen_rtx_MEM (DFmode,
@@ -3140,6 +3315,7 @@ hppa_expand_epilogue ()
as possible.) */
if (! frame_pointer_needed
&& ! VAL_14_BITS_P (actual_fsize + 20)
+ && ! TARGET_64BIT
&& (regs_ever_live[2] || profile_flag))
{
set_reg_plus_d (STACK_POINTER_REGNUM,
@@ -3154,6 +3330,23 @@ hppa_expand_epilogue ()
doesn't set %r1, just %r30. */
load_reg (2, - 20, STACK_POINTER_REGNUM);
}
+ else if (! frame_pointer_needed
+ && ! VAL_14_BITS_P (actual_fsize + 20)
+ && TARGET_64BIT
+ && (regs_ever_live[2] || profile_flag))
+ {
+ set_reg_plus_d (STACK_POINTER_REGNUM,
+ STACK_POINTER_REGNUM,
+ - actual_fsize);
+
+ /* This used to try and be clever by not depending on the value in
+ %r30 and instead use the value held in %r1 (so that the 2nd insn
+ which sets %r30 could be put in the delay slot of the return insn).
+
+ That won't work since if the stack is exactly 8k set_reg_plus_d
+ doesn't set %r1, just %r30. */
+ load_reg (2, - 16, STACK_POINTER_REGNUM);
+ }
/* Reset stack pointer (and possibly frame pointer). The stack
pointer is initially set to fp + 64 to avoid a race condition. */
@@ -3216,7 +3409,10 @@ return_addr_rtx (count, frameaddr)
/* First, we start off with the normal return address pointer from
-20[frameaddr]. */
- emit_move_insn (saved_rp, plus_constant (frameaddr, -5 * UNITS_PER_WORD));
+ if (TARGET_64BIT)
+ return gen_rtx_MEM (Pmode, plus_constant (frameaddr, -16));
+ else
+ emit_move_insn (saved_rp, plus_constant (frameaddr, -5 * UNITS_PER_WORD));
/* Get pointer to the instruction stream. We have to mask out the
privilege level from the two low order bits of the return address
@@ -3824,6 +4020,13 @@ print_operand (file, x, code)
return;
}
abort();
+ case 'Q':
+ if (GET_CODE (x) == CONST_INT)
+ {
+ fprintf (file, "%d", 64 - (INTVAL (x) & 63));
+ return;
+ }
+ abort();
case 'L':
if (GET_CODE (x) == CONST_INT)
{
@@ -3838,6 +4041,13 @@ print_operand (file, x, code)
return;
}
abort();
+ case 'p':
+ if (GET_CODE (x) == CONST_INT)
+ {
+ fprintf (file, "%d", 63 - (INTVAL (x) & 63));
+ return;
+ }
+ abort();
case 'P':
if (GET_CODE (x) == CONST_INT)
{
@@ -3900,13 +4110,27 @@ print_operand (file, x, code)
fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
return;
}
+ case 'z':
+ {
+ unsigned op[3];
+ compute_zdepdi_operands (INTVAL (x), op);
+ fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
+ return;
+ }
default:
abort ();
}
if (GET_CODE (x) == REG)
{
fputs (reg_names [REGNO (x)], file);
- if (FP_REG_P (x) && GET_MODE_SIZE (GET_MODE (x)) <= 4 && (REGNO (x) & 1) == 0)
+ if (TARGET_64BIT && FP_REG_P (x) && GET_MODE_SIZE (GET_MODE (x)) <= 4)
+ {
+ fputs ("R", file);
+ return;
+ }
+ if (FP_REG_P (x)
+ && GET_MODE_SIZE (GET_MODE (x)) <= 4
+ && (REGNO (x) & 1) == 0)
fputs ("L", file);
}
else if (GET_CODE (x) == MEM)
@@ -3917,12 +4141,12 @@ print_operand (file, x, code)
{
case PRE_DEC:
case POST_DEC:
- base = XEXP (XEXP (x, 0), 0);
+ base = XEXP (XEXP (x, 0), 0);
fprintf (file, "-%d(%s)", size, reg_names [REGNO (base)]);
break;
case PRE_INC:
case POST_INC:
- base = XEXP (XEXP (x, 0), 0);
+ base = XEXP (XEXP (x, 0), 0);
fprintf (file, "%d(%s)", size, reg_names [REGNO (base)]);
break;
default:
@@ -4221,6 +4445,11 @@ output_arg_descriptor (call_insn)
int i, output_flag = 0;
int regno;
+ /* We neither need nor want argument location descriptors for the
+ 64bit runtime environment. */
+ if (TARGET_64BIT)
+ return;
+
for (i = 0; i < 4; i++)
arg_regs[i] = 0;
@@ -4420,6 +4649,36 @@ hppa_builtin_saveregs ()
else
offset = current_function_arg_offset_rtx;
+ if (TARGET_64BIT)
+ {
+ int i, off;
+
+ /* Adjust for varargs/stdarg differences. */
+ if (argadj)
+ offset = plus_constant (current_function_arg_offset_rtx, -argadj);
+ else
+ offset = current_function_arg_offset_rtx;
+
+ /* We need to save %r26 .. %r19 inclusive starting at offset -64
+ from the incoming arg pointer and growing to larger addresses. */
+ for (i = 26, off = -64; i >= 19; i--, off += 8)
+ emit_move_insn (gen_rtx_MEM (word_mode,
+ plus_constant (arg_pointer_rtx, off)),
+ gen_rtx_REG (word_mode, i));
+
+ /* The incoming args pointer points just beyond the flushback area;
+ normally this is not a serious concern. Howver, when we are doing
+ varargs/stdargs we want to make the arg pointer point to the start
+ of the incoming argument area. */
+ emit_move_insn (virtual_incoming_args_rtx,
+ plus_constant (arg_pointer_rtx, -64));
+
+ /* Now return a pointer to the first anonymous argument. */
+ return copy_to_reg (expand_binop (Pmode, add_optab,
+ virtual_incoming_args_rtx,
+ offset, 0, 0, OPTAB_LIB_WIDEN));
+ }
+
/* Store general registers on the stack. */
dest = gen_rtx_MEM (BLKmode,
plus_constant (current_function_internal_arg_pointer,
@@ -4467,6 +4726,25 @@ hppa_va_arg (valist, type)
HOST_WIDE_INT align, size, ofs;
tree t, ptr, pptr;
+ if (TARGET_64BIT)
+ {
+ /* Every argument in PA64 is passed by value (including large structs).
+ Arguments with size greater than 8 must be aligned 0 MOD 16. */
+
+ size = int_size_in_bytes (type);
+ if (size > UNITS_PER_WORD)
+ {
+ t = build (PLUS_EXPR, TREE_TYPE (valist), valist,
+ build_int_2 (2 * UNITS_PER_WORD - 1, 0));
+ t = build (BIT_AND_EXPR, TREE_TYPE (t), t,
+ build_int_2 (-2 * UNITS_PER_WORD, -1));
+ t = build (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
+ TREE_SIDE_EFFECTS (t) = 1;
+ expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+ }
+ return std_expand_builtin_va_arg (valist, type);
+ }
+
/* Compute the rounded size of the type. */
align = PARM_BOUNDARY / BITS_PER_UNIT;
size = int_size_in_bytes (type);
@@ -4570,6 +4848,8 @@ output_cbranch (operands, nullify, length, negated, insn)
strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
else
strcpy (buf, "{com%I2b,|cmp%I2b,}");
+ if (GET_MODE (operands[1]) == DImode)
+ strcat (buf, "*");
if (negated)
strcat (buf, "%B3");
else
@@ -4593,6 +4873,8 @@ output_cbranch (operands, nullify, length, negated, insn)
&& nullify)
{
strcpy (buf, "{com%I2b,|cmp%I2b,}");
+ if (GET_MODE (operands[1]) == DImode)
+ strcat (buf, "*");
if (negated)
strcat (buf, "%S3");
else
@@ -4609,6 +4891,8 @@ output_cbranch (operands, nullify, length, negated, insn)
- insn_addresses[INSN_UID (insn)] - 8))
{
strcpy (buf, "{com%I2b,|cmp%I2b,}");
+ if (GET_MODE (operands[1]) == DImode)
+ strcat (buf, "*");
if (negated)
strcat (buf, "%B3 %2,%r1,%0%#");
else
@@ -4617,6 +4901,8 @@ output_cbranch (operands, nullify, length, negated, insn)
else
{
strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
+ if (GET_MODE (operands[1]) == DImode)
+ strcat (buf, "*");
if (negated)
strcat (buf, "%S3");
else
@@ -4640,6 +4926,15 @@ output_cbranch (operands, nullify, length, negated, insn)
strcpy (buf, "{com%I2b,%S3,n %2,%r1,.+20|cmp%I2b,%S3,n %2,%r1,.+20}");
else
strcpy (buf, "{com%I2b,%B3,n %2,%r1,.+20|cmp%I2b,%B3,n %2,%r1,.+20}");
+ if (GET_MODE (operands[1]) == DImode)
+ {
+ if (negated)
+ strcpy (buf,
+ "{com%I2b,*%S3,n %2,%r1,.+20|cmp%I2b,*%S3,n %2,%r1,.+20}");
+ else
+ strcpy (buf,
+ "{com%I2b,*%B3,n %2,%r1,.+20|cmp%I2b,*%B3,n %2,%r1,.+20}");
+ }
output_asm_insn (buf, operands);
/* Output an insn to save %r1. */
@@ -4665,6 +4960,13 @@ output_cbranch (operands, nullify, length, negated, insn)
strcpy (buf, "{com%I2b,%S3,n %2,%r1,.+28|cmp%I2b,%S3,n %2,%r1,.+28}");
else
strcpy (buf, "{com%I2b,%B3,n %2,%r1,.+28|cmp%I2b,%B3,n %2,%r1,.+28}");
+ if (GET_MODE (operands[1]) == DImode)
+ {
+ if (negated)
+ strcpy (buf, "{com%I2b,*%S3,n %2,%r1,.+28|cmp%I2b,*%S3,n %2,%r1,.+28}");
+ else
+ strcpy (buf, "{com%I2b,*%B3,n %2,%r1,.+28|cmp%I2b,*%B3,n %2,%r1,.+28}");
+ }
output_asm_insn (buf, operands);
/* Output an insn to save %r1. */
@@ -4752,6 +5054,10 @@ output_bb (operands, nullify, length, negated, insn, which)
strcpy (buf, "{extrs,|extrw,s,}");
else
strcpy (buf, "bb,");
+ if (useskip && GET_MODE (operands[0]) == DImode)
+ strcpy (buf, "extrd,s,*");
+ else if (GET_MODE (operands[0]) == DImode)
+ strcpy (buf, "bb,*");
if ((which == 0 && negated)
|| (which == 1 && ! negated))
strcat (buf, ">=");
@@ -4780,6 +5086,8 @@ output_bb (operands, nullify, length, negated, insn, which)
&& nullify)
{
strcpy (buf, "bb,");
+ if (GET_MODE (operands[0]) == DImode)
+ strcat (buf, "*");
if ((which == 0 && negated)
|| (which == 1 && ! negated))
strcat (buf, "<");
@@ -4800,6 +5108,8 @@ output_bb (operands, nullify, length, negated, insn, which)
- insn_addresses[INSN_UID (insn)] - 8))
{
strcpy (buf, "bb,");
+ if (GET_MODE (operands[0]) == DImode)
+ strcat (buf, "*");
if ((which == 0 && negated)
|| (which == 1 && ! negated))
strcat (buf, ">=");
@@ -4813,6 +5123,8 @@ output_bb (operands, nullify, length, negated, insn, which)
else
{
strcpy (buf, "{extrs,|extrw,s,}");
+ if (GET_MODE (operands[0]) == DImode)
+ strcpy (buf, "extrd,s,*");
if ((which == 0 && negated)
|| (which == 1 && ! negated))
strcat (buf, "<");
@@ -4890,6 +5202,10 @@ output_bvb (operands, nullify, length, negated, insn, which)
strcpy (buf, "{vextrs,|extrw,s,}");
else
strcpy (buf, "{bvb,|bb,}");
+ if (useskip && GET_MODE (operands[0]) == DImode)
+ strcpy (buf, "extrd,s,*}");
+ else if (GET_MODE (operands[0]) == DImode)
+ strcpy (buf, "bb,*");
if ((which == 0 && negated)
|| (which == 1 && ! negated))
strcat (buf, ">=");
@@ -4918,6 +5234,8 @@ output_bvb (operands, nullify, length, negated, insn, which)
&& nullify)
{
strcpy (buf, "{bvb,|bb,}");
+ if (GET_MODE (operands[0]) == DImode)
+ strcat (buf, "*");
if ((which == 0 && negated)
|| (which == 1 && ! negated))
strcat (buf, "<");
@@ -4938,6 +5256,8 @@ output_bvb (operands, nullify, length, negated, insn, which)
- insn_addresses[INSN_UID (insn)] - 8))
{
strcpy (buf, "{bvb,|bb,}");
+ if (GET_MODE (operands[0]) == DImode)
+ strcat (buf, "*");
if ((which == 0 && negated)
|| (which == 1 && ! negated))
strcat (buf, ">=");
@@ -4951,6 +5271,8 @@ output_bvb (operands, nullify, length, negated, insn, which)
else
{
strcpy (buf, "{vextrs,|extrw,s,}");
+ if (GET_MODE (operands[0]) == DImode)
+ strcpy (buf, "extrd,s,*");
if ((which == 0 && negated)
|| (which == 1 && ! negated))
strcat (buf, "<");
@@ -5206,6 +5528,10 @@ output_millicode_call (insn, call_dest)
rtx xoperands[4];
rtx seq_insn;
+ xoperands[3] = gen_rtx_REG (SImode, 31);
+ if (TARGET_64BIT)
+ xoperands[3] = gen_rtx_REG (SImode, 2);
+
/* Handle common case -- empty delay slot or no jump in the delay slot,
and we're sure that the branch will reach the beginning of the $CODE$
subspace. */
@@ -5216,7 +5542,7 @@ output_millicode_call (insn, call_dest)
&& get_attr_length (insn) == 4))
{
xoperands[0] = call_dest;
- output_asm_insn ("{bl|b,l} %0,%%r31%#", xoperands);
+ output_asm_insn ("{bl|b,l} %0,%3%#", xoperands);
return "";
}
@@ -5224,7 +5550,6 @@ output_millicode_call (insn, call_dest)
if (get_attr_length (insn) > 4)
{
int delay_insn_deleted = 0;
- rtx xoperands[2];
/* We need to emit an inline long-call branch. */
if (dbr_sequence_length () != 0
@@ -5247,8 +5572,8 @@ output_millicode_call (insn, call_dest)
|| ! (flag_pic || TARGET_PORTABLE_RUNTIME))
{
xoperands[0] = call_dest;
- output_asm_insn ("ldil L%%%0,%%r31", xoperands);
- output_asm_insn ("{ble|be,l} R%%%0(%%sr4,%%r31)", xoperands);
+ output_asm_insn ("ldil L%%%0,%3", xoperands);
+ output_asm_insn ("{ble|be,l} R%%%0(%%sr4,%3)", xoperands);
output_asm_insn ("nop", xoperands);
}
/* Pure portable runtime doesn't allow be/ble; we also don't have
@@ -5261,7 +5586,7 @@ output_millicode_call (insn, call_dest)
output_asm_insn ("ldo R%%%0(%%r29),%%r29", xoperands);
/* Get our return address into %r31. */
- output_asm_insn ("blr %%r0,%%r31", xoperands);
+ output_asm_insn ("blr %%r0,%3", xoperands);
/* Jump to our target address in %r29. */
output_asm_insn ("bv,n %%r0(%%r29)", xoperands);
@@ -5286,7 +5611,7 @@ output_millicode_call (insn, call_dest)
output_asm_insn ("ldo R%%%0-%1(%%r1),%%r1", xoperands);
/* Get the return address into %r31. */
- output_asm_insn ("blr 0,%%r31", xoperands);
+ output_asm_insn ("blr 0,%3", xoperands);
/* Branch to our target which is in %r1. */
output_asm_insn ("bv,n %%r0(%%r1)", xoperands);
@@ -5330,11 +5655,11 @@ output_millicode_call (insn, call_dest)
xoperands[0] = call_dest;
xoperands[1] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
if (! VAL_14_BITS_P (distance))
- output_asm_insn ("{bl|b,l} %0,%%r31\n\tnop\n\tb,n %1", xoperands);
+ output_asm_insn ("{bl|b,l} %0,%3\n\tnop\n\tb,n %1", xoperands);
else
{
xoperands[2] = gen_label_rtx ();
- output_asm_insn ("\n\t{bl|b,l} %0,%%r31\n\tldo %1-%2(%%r31),%%r31",
+ output_asm_insn ("\n\t{bl|b,l} %0,%3\n\tldo %1-%2(%3),%3",
xoperands);
ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, "L",
CODE_LABEL_NUMBER (xoperands[2]));
@@ -5358,9 +5683,10 @@ extern struct obstack *current_obstack;
CALL_DEST is the routine we are calling. */
const char *
-output_call (insn, call_dest)
+output_call (insn, call_dest, sibcall)
rtx insn;
rtx call_dest;
+ int sibcall;
{
int distance;
rtx xoperands[4];
@@ -5376,7 +5702,8 @@ output_call (insn, call_dest)
&& get_attr_length (insn) == 4))
{
xoperands[0] = call_dest;
- output_asm_insn ("{bl|b,l} %0,%%r2%#", xoperands);
+ xoperands[1] = gen_rtx_REG (word_mode, sibcall ? 0 : 2);
+ output_asm_insn ("{bl|b,l} %0,%1%#", xoperands);
return "";
}
@@ -5527,8 +5854,17 @@ output_call (insn, call_dest)
/* Branch to our target which is in %r1. */
output_asm_insn ("bv %%r0(%%r1)", xoperands);
- /* Copy the return address into %r2 also. */
- output_asm_insn ("copy %%r31,%%r2", xoperands);
+ if (sibcall)
+ {
+ /* This call never returns, so we do not need to fix the
+ return pointer. */
+ output_asm_insn ("nop", xoperands);
+ }
+ else
+ {
+ /* Copy the return address into %r2 also. */
+ output_asm_insn ("copy %%r31,%%r2", xoperands);
+ }
}
else
{
@@ -5544,8 +5880,17 @@ output_call (insn, call_dest)
output_asm_insn ("{ble|be,l} R%%$$dyncall(%%sr4,%%r2)",
xoperands);
- /* Copy the return pointer into both %r31 and %r2. */
- output_asm_insn ("copy %%r31,%%r2", xoperands);
+ if (sibcall)
+ {
+ /* This call never returns, so we do not need to fix the
+ return pointer. */
+ output_asm_insn ("nop", xoperands);
+ }
+ else
+ {
+ /* Copy the return address into %r2 also. */
+ output_asm_insn ("copy %%r31,%%r2", xoperands);
+ }
}
}
@@ -6593,6 +6938,245 @@ insn_refs_are_delayed (insn)
&& get_attr_type (insn) == TYPE_MILLI));
}
+/* Return the location of a parameter that is passed in a register or NULL
+ if the parameter has any component that is passed in memory.
+
+ This is new code and will be pushed to into the net sources after
+ further testing.
+
+ ??? We might want to restructure this so that it looks more like other
+ ports. */
+rtx
+function_arg (cum, mode, type, named, incoming)
+ CUMULATIVE_ARGS *cum;
+ enum machine_mode mode;
+ tree type;
+ int named;
+ int incoming;
+{
+ int max_arg_words = (TARGET_64BIT ? 8 : 4);
+ int fpr_reg_base;
+ int gpr_reg_base;
+ rtx retval;
+
+ if (! TARGET_64BIT)
+ {
+ /* If this arg would be passed partially or totally on the stack, then
+ this routine should return zero. FUNCTION_ARG_PARTIAL_NREGS will
+ handle arguments which are split between regs and stack slots if
+ the ABI mandates split arguments. */
+ if (cum->words + FUNCTION_ARG_SIZE (mode, type) > max_arg_words
+ || mode == VOIDmode)
+ return NULL_RTX;
+ }
+ else
+ {
+ int offset = 0;
+ if (FUNCTION_ARG_SIZE (mode, type) > 1 && (cum->words & 1))
+ offset = 1;
+ if (cum->words + offset >= max_arg_words
+ || mode == VOIDmode)
+ return NULL_RTX;
+ }
+
+ /* The 32bit ABIs and the 64bit ABIs are rather different,
+ particularly in their handling of FP registers. We might
+ be able to cleverly share code between them, but I'm not
+ going to bother in the hope that spltting them up results
+ in code that is more easily understood.
+
+ The 64bit code probably is very wrong for structure passing. */
+ if (TARGET_64BIT)
+ {
+ /* Advance the base registers to their current locations.
+
+ Remember, gprs grow towards smaller register numbers while
+ fprs grow to higher register numbers. Also remember FP regs
+ are always 4 bytes wide, while the size of an integer register
+ varies based on the size of the target word. */
+ gpr_reg_base = 26 - cum->words;
+ fpr_reg_base = 32 + cum->words;
+
+ /* If the argument is more than a word long, then we need to align
+ the base registers. Same caveats as above. */
+ if (FUNCTION_ARG_SIZE (mode, type) > 1)
+ {
+ if (mode != BLKmode)
+ {
+ /* First deal with alignment of the doubleword. */
+ gpr_reg_base -= (cum->words & 1);
+
+ /* This seems backwards, but it is what HP specifies. We need
+ gpr_reg_base to point to the smaller numbered register of
+ the integer register pair. So if we have an even register
+ number, then decrement the gpr base. */
+ gpr_reg_base -= ((gpr_reg_base % 2) == 0);
+
+ /* FP values behave sanely, except that each FP reg is only
+ half of word. */
+ fpr_reg_base += ((fpr_reg_base % 2) == 0);
+ }
+ else
+ {
+ rtx loc[8];
+ int i, offset = 0, ub;
+ ub = FUNCTION_ARG_SIZE (mode, type);
+ ub = MIN(ub,
+ MAX(0, max_arg_words - cum->words - (cum->words & 1)));
+ gpr_reg_base -= (cum->words & 1);
+ for (i = 0; i < ub; i++)
+ {
+ loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (DImode,
+ gpr_reg_base),
+ GEN_INT(offset));
+ gpr_reg_base -= 1;
+ offset += 8;
+ }
+ if (ub == 0)
+ return NULL_RTX;
+ else if (ub == 1)
+ return XEXP (loc[0], 0);
+ else
+ return gen_rtx_PARALLEL(mode, gen_rtvec_v(ub, loc));
+ }
+ }
+ }
+ else
+ {
+ /* If the argument is larger than a word, then we know precisely
+ which registers we must use. */
+ if (FUNCTION_ARG_SIZE (mode, type) > 1)
+ {
+ if (cum->words)
+ {
+ gpr_reg_base = 23;
+ fpr_reg_base = 38;
+ }
+ else
+ {
+ gpr_reg_base = 25;
+ fpr_reg_base = 34;
+ }
+ }
+ else
+ {
+ /* We have a single word (32 bits). A simple computation
+ will get us the register #s we need. */
+ gpr_reg_base = 26 - cum->words;
+ fpr_reg_base = 32 + 2 * cum->words;
+ }
+ }
+
+ if (TARGET_64BIT && mode == TFmode)
+ {
+ return
+ gen_rtx_PARALLEL
+ (mode,
+ gen_rtvec (2,
+ gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (DImode, gpr_reg_base + 1),
+ const0_rtx),
+ gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (DImode, gpr_reg_base),
+ GEN_INT (8))));
+ }
+ /* Determine if the register needs to be passed in both general and
+ floating point registers. */
+ if ((TARGET_PORTABLE_RUNTIME || TARGET_64BIT)
+ /* If we are doing soft-float with portable runtime, then there
+ is no need to worry about FP regs. */
+ && ! TARGET_SOFT_FLOAT
+ /* The parameter must be some kind of float, else we can just
+ pass it in integer registers. */
+ && FLOAT_MODE_P (mode)
+ /* The target function must not have a prototype. */
+ && cum->nargs_prototype <= 0
+ /* libcalls do not need to pass items in both FP and general
+ registers. */
+ && type != NULL_TREE
+ /* All this hair applies to outgoing args only. */
+ && !incoming)
+ {
+ retval
+ = gen_rtx_PARALLEL
+ (mode,
+ gen_rtvec (2,
+ gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (mode, fpr_reg_base),
+ const0_rtx),
+ gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (mode, gpr_reg_base),
+ const0_rtx)));
+ }
+ else
+ {
+ /* See if we should pass this parameter in a general register. */
+ if (TARGET_SOFT_FLOAT
+ /* Indirect calls in the normal 32bit ABI require all arguments
+ to be passed in general registers. */
+ || (!TARGET_PORTABLE_RUNTIME
+ && !TARGET_64BIT
+ && cum->indirect)
+ /* If the parameter is not a floating point parameter, then
+ it belongs in GPRs. */
+ || !FLOAT_MODE_P (mode))
+ retval = gen_rtx_REG (mode, gpr_reg_base);
+ else
+ retval = gen_rtx_REG (mode, fpr_reg_base);
+ }
+ return retval;
+}
+
+
+/* If this arg would be passed totally in registers or totally on the stack,
+ then this routine should return zero. It is currently called only for
+ the 64-bit target. */
+int
+function_arg_partial_nregs (cum, mode, type, named)
+ CUMULATIVE_ARGS *cum;
+ enum machine_mode mode;
+ tree type;
+ int named;
+{
+ int max_arg_words = 8;
+ int offset = 0;
+
+ if (FUNCTION_ARG_SIZE(mode, type) > 1 && (cum->words & 1))
+ offset = 1;
+
+ if (cum->words + offset + FUNCTION_ARG_SIZE(mode, type) <= max_arg_words)
+ /* Arg fits fully into registers. */
+ return 0;
+ else if (cum->words + offset >= max_arg_words)
+ /* Arg fully on the stack. */
+ return 0;
+ else
+ /* Arg is split. */
+ return max_arg_words - cum->words - offset;
+
+}
+
+
+/* Return 1 if this is a comparison operator. This allows the use of
+ MATCH_OPERATOR to recognize all the branch insns. */
+
+int
+cmpib_comparison_operator (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ return ((mode == VOIDmode || GET_MODE (op) == mode)
+ && (GET_CODE (op) == EQ
+ || GET_CODE (op) == NE
+ || GET_CODE (op) == GT
+ || GET_CODE (op) == GE
+ || GET_CODE (op) == GTU
+ || GET_CODE (op) == LT
+ || GET_CODE (op) == LE
+ || GET_CODE (op) == LEU));
+}
+
/* Mark ARG (which is really a struct deferred_plabel **) for GC. */
static void
diff --git a/gcc/config/pa/pa.h b/gcc/config/pa/pa.h
index 06571c9..bd6cd31 100644
--- a/gcc/config/pa/pa.h
+++ b/gcc/config/pa/pa.h
@@ -85,7 +85,10 @@ extern int target_flags;
/* compile code for HP-PA 1.1 ("Snake") */
#define MASK_PA_11 1
+
+#ifndef TARGET_PA_11
#define TARGET_PA_11 (target_flags & MASK_PA_11)
+#endif
/* Disable all FP registers (they all become fixed). This may be necessary
for compiling kernels which perform lazy context switching of FP regs.
@@ -150,7 +153,14 @@ extern int target_flags;
/* Generate code for the HPPA 2.0 architecture. TARGET_PA_11 should also be
true when this is true. */
#define MASK_PA_20 4096
+#ifndef TARGET_PA_20
#define TARGET_PA_20 (target_flags & MASK_PA_20)
+#endif
+
+/* Generate code for the HPPA 2.0 architecture in 64bit mode. */
+#ifndef TARGET_64BIT
+#define TARGET_64BIT 0
+#endif
/* Macro to define tables used to set the flags.
This is a list in braces of pairs in braces,
@@ -248,17 +258,63 @@ extern int target_flags;
((GET_CODE (X) == PLUS ? OFFSET : 0) \
+ (frame_pointer_needed ? 0 : compute_frame_size (get_frame_size (), 0)))
+#define CPP_PA10_SPEC ""
+#define CPP_PA11_SPEC "-D_PA_RISC1_1 -D__hp9000s700"
+#define CPP_PA20_SPEC "-D_PA_RISC2_0 -D__hp9000s800"
+#define CPP_64BIT_SPEC "-D__LP64__ -D__LONG_MAX__=9223372036854775807L"
+
#if ((TARGET_DEFAULT | TARGET_CPU_DEFAULT) & MASK_PA_11) == 0
-#define CPP_SPEC "%{msnake:-D__hp9000s700 -D_PA_RISC1_1}\
- %{mpa-risc-1-1:-D__hp9000s700 -D_PA_RISC1_1}\
- %{!ansi: -D_HPUX_SOURCE -D_HIUX_SOURCE -D__STDC_EXT__}\
- %{threads:-D_REENTRANT -D_DCE_THREADS}"
+#define CPP_CPU_DEFAULT_SPEC "%(cpp_pa10)"
+#endif
+
+#if ((TARGET_DEFAULT | TARGET_CPU_DEFAULT) & MASK_PA_11) != 0
+#if ((TARGET_DEFAULT | TARGET_CPU_DEFAULT) & MASK_PA_20) != 0
+#define CPP_CPU_DEFAULT_SPEC "%(cpp_pa11) %(cpp_pa20)"
#else
-#define CPP_SPEC "%{!mpa-risc-1-0:%{!mnosnake:%{!msoft-float:-D__hp9000s700 -D_PA_RISC1_1}}} \
- %{!ansi: -D_HPUX_SOURCE -D_HIUX_SOURCE -D__STDC_EXT__}\
- %{threads:-D_REENTRANT -D_DCE_THREADS}"
+#define CPP_CPU_DEFAULT_SPEC "%(cpp_pa11)"
+#endif
+#endif
+
+#if TARGET_64BIT
+#define CPP_64BIT_DEFAULT_SPEC "%(cpp_64bit)"
+#else
+#define CPP_64BIT_DEFAULT_SPEC ""
+#endif
+
+/* This macro defines names of additional specifications to put in the
+ specs that can be used in various specifications like CC1_SPEC. Its
+ definition is an initializer with a subgrouping for each command option.
+
+ Each subgrouping contains a string constant, that defines the
+ specification name, and a string constant that used by the GNU CC driver
+ program.
+
+ Do not define this macro if it does not need to do anything. */
+
+#ifndef SUBTARGET_EXTRA_SPECS
+#define SUBTARGET_EXTRA_SPECS
#endif
+#define EXTRA_SPECS \
+ { "cpp_pa10", CPP_PA10_SPEC}, \
+ { "cpp_pa11", CPP_PA11_SPEC}, \
+ { "cpp_pa20", CPP_PA20_SPEC}, \
+ { "cpp_64bit", CPP_64BIT_SPEC}, \
+ { "cpp_cpu_default", CPP_CPU_DEFAULT_SPEC }, \
+ { "cpp_64bit_default", CPP_64BIT_DEFAULT_SPEC }, \
+ SUBTARGET_EXTRA_SPECS
+
+#define CPP_SPEC "\
+%{mpa-risc-1-0:%(cpp_pa10)} \
+%{mpa-risc-1-1:%(cpp_pa11)} \
+%{msnake:%(cpp_pa11)} \
+%{mpa-risc-2-0:%(cpp_pa20)} \
+%{!mpa-risc-1-0:%{!mpa-risc-1-1:%{!mpa-risc-2-0:%{!msnake:%(cpp_cpu_default)}}}} \
+%{m64bit:%(cpp_64bit)} \
+%{!m64bit:%(cpp_64bit_default)} \
+%{!ansi: -D_HPUX_SOURCE -D_HIUX_SOURCE -D__STDC_EXT__}\
+%{threads:-D_REENTRANT -D_DCE_THREADS}"
+
/* Defines for a K&R CC */
#define CC1_SPEC "%{pg:} %{p:}"
@@ -323,10 +379,14 @@ extern int target_flags;
Note that this is not necessarily the width of data type `int';
if using 16-bit ints on a 68000, this would still be 32.
But on a machine with 16-bit registers, this would be 16. */
-#define BITS_PER_WORD 32
+#define BITS_PER_WORD (TARGET_64BIT ? 64 : 32)
+#define MAX_BITS_PER_WORD 64
+#define MAX_LONG_TYPE_SIZE 64
+#define MAX_WCHAR_TYPE_SIZE 32
/* Width of a word, in units (bytes). */
-#define UNITS_PER_WORD 4
+#define UNITS_PER_WORD (TARGET_64BIT ? 8 : 4)
+#define MIN_UNITS_PER_WORD 4
/* Width in bits of a pointer.
See also the macro `Pmode' defined below. */
@@ -344,10 +404,10 @@ extern int target_flags;
GCC for the PA always rounds its stacks to a 512bit boundary,
but that happens late in the compilation process. */
-#define STACK_BOUNDARY 64
+#define STACK_BOUNDARY (TARGET_64BIT ? 128 : 64)
/* Allocation boundary (in *bits*) for the code of a function. */
-#define FUNCTION_BOUNDARY 32
+#define FUNCTION_BOUNDARY (TARGET_64BIT ? 64 : 32)
/* Alignment of field after `int : 0' in a structure. */
#define EMPTY_FIELD_BOUNDARY 32
@@ -420,17 +480,19 @@ extern int target_flags;
/* Register which holds offset table for position-independent
data references. */
-#define PIC_OFFSET_TABLE_REGNUM 19
+#define PIC_OFFSET_TABLE_REGNUM (TARGET_64BIT ? 27 : 19)
#define PIC_OFFSET_TABLE_REG_CALL_CLOBBERED 1
/* Register into which we save the PIC_OFFEST_TABLE_REGNUM so that it
can be restore across function calls. */
#define PIC_OFFSET_TABLE_REGNUM_SAVED 4
-/* SOM ABI says that objects larger than 64 bits are returned in memory. */
#define DEFAULT_PCC_STRUCT_RETURN 0
+
+/* SOM ABI says that objects larger than 64 bits are returned in memory.
+ PA64 ABI says that objects larger than 128 bits are returned in memory. */
#define RETURN_IN_MEMORY(TYPE) \
- (int_size_in_bytes (TYPE) > 8)
+ (TARGET_64BIT ? int_size_in_bytes (TYPE) > 16 : int_size_in_bytes (TYPE) > 8)
/* Register in which address to store a structure value
is passed to a function. */
@@ -447,7 +509,9 @@ extern int target_flags;
`K' is used for values that can be moved with a zdepi insn.
`L' is used for the 5 bit constants.
`M' is used for 0.
- `N' is used for values with the least significant 11 bits equal to zero.
+ `N' is used for values with the least significant 11 bits equal to zero
+ and when sign extended from 32 to 64 bits the
+ value does not change.
`O' is used for numbers n such that n+1 is a power of 2.
*/
@@ -457,8 +521,10 @@ extern int target_flags;
: (C) == 'K' ? zdepi_cint_p (VALUE) \
: (C) == 'L' ? VAL_5_BITS_P (VALUE) \
: (C) == 'M' ? (VALUE) == 0 \
- : (C) == 'N' ? ((VALUE) & 0x7ff) == 0 \
- : (C) == 'O' ? (((VALUE) & ((VALUE) + 1)) == 0) \
+ : (C) == 'N' ? (((VALUE) & (unsigned long)0x7ff) == 0 \
+ && (VALUE) == ((((VALUE) & 0xffffffff) ^ (~0x7fffffff)) \
+ + 0x80000000)) \
+ : (C) == 'O' ? (((VALUE) & ((VALUE) + (long)1)) == 0) \
: (C) == 'P' ? and_mask_p (VALUE) \
: 0)
@@ -542,11 +608,11 @@ extern int target_flags;
argument, not it's beginning. To get the real offset of the first
argument, the size of the argument must be added. */
-#define FIRST_PARM_OFFSET(FNDECL) -32
+#define FIRST_PARM_OFFSET(FNDECL) (TARGET_64BIT ? -64 : -32)
/* When a parameter is passed in a register, stack space is still
allocated for it. */
-#define REG_PARM_STACK_SPACE(DECL) 16
+#define REG_PARM_STACK_SPACE(DECL) (TARGET_64BIT ? 64 : 16)
/* Define this if the above stack space is to be considered part of the
space allocated by the caller. */
@@ -562,10 +628,13 @@ extern int target_flags;
the stack: 16 bytes for register saves, and 32 bytes for magic.
This is the difference between the logical top of stack and the
actual sp. */
-#define STACK_POINTER_OFFSET -32
+#define STACK_POINTER_OFFSET \
+ (TARGET_64BIT ? -(current_function_outgoing_args_size + 16): -32)
#define STACK_DYNAMIC_OFFSET(FNDECL) \
- ((STACK_POINTER_OFFSET) - current_function_outgoing_args_size)
+ (TARGET_64BIT \
+ ? (STACK_POINTER_OFFSET) \
+ : ((STACK_POINTER_OFFSET) - current_function_outgoing_args_size))
/* Value is 1 if returning from a function call automatically
pops the arguments described by the number-of-args field in the call.
@@ -583,11 +652,14 @@ extern int target_flags;
/* On the HP-PA the value is found in register(s) 28(-29), unless
the mode is SF or DF. Then the value is returned in fr4 (32, ) */
-#define FUNCTION_VALUE(VALTYPE, FUNC) \
- gen_rtx_REG (TYPE_MODE (VALTYPE), ((! TARGET_SOFT_FLOAT \
- && (TYPE_MODE (VALTYPE) == SFmode || \
- TYPE_MODE (VALTYPE) == DFmode)) ? \
- 32 : 28))
+/* This must perform the same promotions as PROMOTE_MODE, else
+ PROMOTE_FUNCTION_RETURN will not work correctly. */
+#define FUNCTION_VALUE(VALTYPE, FUNC) \
+ gen_rtx_REG (((INTEGRAL_TYPE_P (VALTYPE) \
+ && TYPE_PRECISION (VALTYPE) < BITS_PER_WORD) \
+ || POINTER_TYPE_P (VALTYPE)) \
+ ? word_mode : TYPE_MODE (VALTYPE), \
+ TREE_CODE (VALTYPE) == REAL_TYPE && !TARGET_SOFT_FLOAT ? 32 : 28)
/* Define how to find the value returned by a library function
assuming the value has mode MODE. */
@@ -712,54 +784,20 @@ struct hppa_args {int words, nargs_prototype, indirect; };
/* Do not expect to understand this without reading it several times. I'm
tempted to try and simply it, but I worry about breaking something. */
-#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \
- (4 >= ((CUM).words + FUNCTION_ARG_SIZE ((MODE), (TYPE))) \
- ? (!TARGET_PORTABLE_RUNTIME || (TYPE) == 0 \
- || !FLOAT_MODE_P (MODE) || TARGET_SOFT_FLOAT \
- || (CUM).nargs_prototype > 0) \
- ? gen_rtx_REG ((MODE), \
- (FUNCTION_ARG_SIZE ((MODE), (TYPE)) > 1 \
- ? (((!(CUM).indirect \
- || TARGET_PORTABLE_RUNTIME) \
- && (MODE) == DFmode \
- && ! TARGET_SOFT_FLOAT) \
- ? ((CUM).words ? 38 : 34) \
- : ((CUM).words ? 23 : 25)) \
- : (((!(CUM).indirect \
- || TARGET_PORTABLE_RUNTIME) \
- && (MODE) == SFmode \
- && ! TARGET_SOFT_FLOAT) \
- ? (32 + 2 * (CUM).words) \
- : (27 - (CUM).words - FUNCTION_ARG_SIZE ((MODE),\
- (TYPE))))))\
- /* We are calling a non-prototyped function with floating point \
- arguments using the portable conventions. */ \
- : (gen_rtx_PARALLEL \
- ((MODE), \
- gen_rtvec \
- (2, \
- gen_rtx_EXPR_LIST \
- (VOIDmode, \
- gen_rtx_REG ((MODE), \
- (FUNCTION_ARG_SIZE ((MODE), (TYPE)) > 1 \
- ? ((CUM).words ? 38 : 34) : (32 + 2 * (CUM).words))), \
- const0_rtx), \
- gen_rtx_EXPR_LIST \
- (VOIDmode, \
- gen_rtx_REG ((MODE), \
- (FUNCTION_ARG_SIZE ((MODE), (TYPE)) > 1 \
- ? ((CUM).words ? 23 : 25) \
- : (27 - (CUM).words - \
- FUNCTION_ARG_SIZE ((MODE), (TYPE))))), \
- const0_rtx)))) \
- /* Pass this parameter in the stack. */ \
- : 0)
+#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \
+ function_arg (&CUM, MODE, TYPE, NAMED, 0)
+
+#define FUNCTION_INCOMING_ARG(CUM, MODE, TYPE, NAMED) \
+ function_arg (&CUM, MODE, TYPE, NAMED, 1)
/* For an arg passed partly in registers and partly in memory,
this is the number of registers used.
For args passed entirely in registers or entirely in memory, zero. */
-#define FUNCTION_ARG_PARTIAL_NREGS(CUM, MODE, TYPE, NAMED) 0
+/* For PA32 there are never split arguments. PA64, on the other hand, can
+ pass arguments partially in registers and partially in memory. */
+#define FUNCTION_ARG_PARTIAL_NREGS(CUM, MODE, TYPE, NAMED) \
+ (TARGET_64BIT ? function_arg_partial_nregs (&CUM, MODE, TYPE, NAMED) : 0)
/* If defined, a C expression that gives the alignment boundary, in
bits, of an argument with the specified mode and type. If it is
@@ -777,13 +815,21 @@ struct hppa_args {int words, nargs_prototype, indirect; };
/* Arguments larger than eight bytes are passed by invisible reference */
+/* PA64 does not pass anything by invisible reference. */
#define FUNCTION_ARG_PASS_BY_REFERENCE(CUM, MODE, TYPE, NAMED) \
- (((TYPE) && int_size_in_bytes (TYPE) > 8) \
- || ((MODE) && GET_MODE_SIZE (MODE) > 8))
+ (TARGET_64BIT \
+ ? 0 \
+ : (((TYPE) && int_size_in_bytes (TYPE) > 8) \
+ || ((MODE) && GET_MODE_SIZE (MODE) > 8)))
+/* PA64 does not pass anything by invisible reference.
+ This should be undef'ed for 64bit, but we'll see if this works. The
+ problem is that we can't test TARGET_64BIT from the preprocessor. */
#define FUNCTION_ARG_CALLEE_COPIES(CUM, MODE, TYPE, NAMED) \
- (((TYPE) && int_size_in_bytes (TYPE) > 8) \
- || ((MODE) && GET_MODE_SIZE (MODE) > 8))
+ (TARGET_64BIT \
+ ? 0 \
+ : (((TYPE) && int_size_in_bytes (TYPE) > 8) \
+ || ((MODE) && GET_MODE_SIZE (MODE) > 8)))
extern struct rtx_def *hppa_compare_op0, *hppa_compare_op1;
@@ -869,22 +915,40 @@ extern int may_call_alloca;
It is best to keep this as small as possible to avoid having to
flush multiple lines in the cache. */
-#define TRAMPOLINE_TEMPLATE(FILE) \
- { \
- fputs ("\tldw 36(%r22),%r21\n", FILE); \
- fputs ("\tbb,>=,n %r21,30,.+16\n", FILE); \
- if (ASSEMBLER_DIALECT == 0) \
- fputs ("\tdepi 0,31,2,%r21\n", FILE); \
- else \
- fputs ("\tdepwi 0,31,2,%r21\n", FILE); \
- fputs ("\tldw 4(%r21),%r19\n", FILE); \
- fputs ("\tldw 0(%r21),%r21\n", FILE); \
- fputs ("\tldsid (%r21),%r1\n", FILE); \
- fputs ("\tmtsp %r1,%sr0\n", FILE); \
- fputs ("\tbe 0(%sr0,%r21)\n", FILE); \
- fputs ("\tldw 40(%r22),%r29\n", FILE); \
- fputs ("\t.word 0\n", FILE); \
- fputs ("\t.word 0\n", FILE); \
+#define TRAMPOLINE_TEMPLATE(FILE) \
+ { \
+ if (! TARGET_64BIT) \
+ { \
+ fputs ("\tldw 36(%r22),%r21\n", FILE); \
+ fputs ("\tbb,>=,n %r21,30,.+16\n", FILE); \
+ if (ASSEMBLER_DIALECT == 0) \
+ fputs ("\tdepi 0,31,2,%r21\n", FILE); \
+ else \
+ fputs ("\tdepwi 0,31,2,%r21\n", FILE); \
+ fputs ("\tldw 4(%r21),%r19\n", FILE); \
+ fputs ("\tldw 0(%r21),%r21\n", FILE); \
+ fputs ("\tldsid (%r21),%r1\n", FILE); \
+ fputs ("\tmtsp %r1,%sr0\n", FILE); \
+ fputs ("\tbe 0(%sr0,%r21)\n", FILE); \
+ fputs ("\tldw 40(%r22),%r29\n", FILE); \
+ fputs ("\t.word 0\n", FILE); \
+ fputs ("\t.word 0\n", FILE); \
+ } \
+ else \
+ { \
+ fputs ("\t.dword 0\n", FILE); \
+ fputs ("\t.dword 0\n", FILE); \
+ fputs ("\t.dword 0\n", FILE); \
+ fputs ("\t.dword 0\n", FILE); \
+ fputs ("\tmfia %r31\n", FILE); \
+ fputs ("\tldd 24(%r31),%r1\n", FILE); \
+ fputs ("\tldd 24(%r1),%r27\n", FILE); \
+ fputs ("\tldd 16(%r1),%r1\n", FILE); \
+ fputs ("\tbve (%r1)\n", FILE); \
+ fputs ("\tldd 32(%r31),%r31\n", FILE); \
+ fputs ("\t.dword 0 ; fptr\n", FILE); \
+ fputs ("\t.dword 0 ; static link\n", FILE); \
+ } \
}
/* Length in units of the trampoline for entering a nested function.
@@ -896,7 +960,7 @@ extern int may_call_alloca;
If the code part of the trampoline ever grows to > 32 bytes, then it
will become necessary to hack on the cacheflush pattern in pa.md. */
-#define TRAMPOLINE_SIZE (11 * 4)
+#define TRAMPOLINE_SIZE (TARGET_64BIT ? 72 : 11 * 4)
/* Emit RTL insns to initialize the variable parts of a trampoline.
FNADDR is an RTX for the address of the function's pure code.
@@ -905,22 +969,49 @@ extern int may_call_alloca;
Move the function address to the trampoline template at offset 12.
Move the static chain value to trampoline template at offset 16. */
-#define INITIALIZE_TRAMPOLINE(TRAMP, FNADDR, CXT) \
+#define INITIALIZE_TRAMPOLINE(TRAMP, FNADDR, CXT) \
{ \
- rtx start_addr, end_addr; \
+ if (! TARGET_64BIT) \
+ { \
+ rtx start_addr, end_addr; \
+ \
+ start_addr = memory_address (Pmode, plus_constant ((TRAMP), 36)); \
+ emit_move_insn (gen_rtx_MEM (Pmode, start_addr), (FNADDR)); \
+ start_addr = memory_address (Pmode, plus_constant ((TRAMP), 40)); \
+ emit_move_insn (gen_rtx_MEM (Pmode, start_addr), (CXT)); \
+ /* fdc and fic only use registers for the address to flush, \
+ they do not accept integer displacements. */ \
+ start_addr = force_reg (Pmode, (TRAMP)); \
+ end_addr = force_reg (Pmode, plus_constant ((TRAMP), 32)); \
+ emit_insn (gen_dcacheflush (start_addr, end_addr)); \
+ end_addr = force_reg (Pmode, plus_constant (start_addr, 32)); \
+ emit_insn (gen_icacheflush (start_addr, end_addr, start_addr, \
+ gen_reg_rtx (Pmode), gen_reg_rtx (Pmode)));\
+ } \
+ else \
+ { \
+ rtx start_addr, end_addr; \
\
- start_addr = memory_address (Pmode, plus_constant ((TRAMP), 36)); \
- emit_move_insn (gen_rtx_MEM (Pmode, start_addr), (FNADDR)); \
- start_addr = memory_address (Pmode, plus_constant ((TRAMP), 40)); \
- emit_move_insn (gen_rtx_MEM (Pmode, start_addr), (CXT)); \
- /* fdc and fic only use registers for the address to flush, \
- they do not accept integer displacements. */ \
- start_addr = force_reg (Pmode, (TRAMP)); \
- end_addr = force_reg (Pmode, plus_constant ((TRAMP), 32)); \
- emit_insn (gen_dcacheflush (start_addr, end_addr)); \
- end_addr = force_reg (Pmode, plus_constant (start_addr, 32)); \
- emit_insn (gen_icacheflush (start_addr, end_addr, start_addr, \
- gen_reg_rtx (Pmode), gen_reg_rtx (Pmode)));\
+ start_addr = memory_address (Pmode, plus_constant ((TRAMP), 56)); \
+ emit_move_insn (gen_rtx_MEM (Pmode, start_addr), (FNADDR)); \
+ start_addr = memory_address (Pmode, plus_constant ((TRAMP), 64)); \
+ emit_move_insn (gen_rtx_MEM (Pmode, start_addr), (CXT)); \
+ /* Create a fat pointer for the trampoline. */ \
+ end_addr = force_reg (Pmode, plus_constant ((TRAMP), 32)); \
+ start_addr = memory_address (Pmode, plus_constant ((TRAMP), 16)); \
+ emit_move_insn (gen_rtx_MEM (Pmode, start_addr), end_addr); \
+ end_addr = gen_rtx_REG (Pmode, 27); \
+ start_addr = memory_address (Pmode, plus_constant ((TRAMP), 24)); \
+ emit_move_insn (gen_rtx_MEM (Pmode, start_addr), end_addr); \
+ /* fdc and fic only use registers for the address to flush, \
+ they do not accept integer displacements. */ \
+ start_addr = force_reg (Pmode, (TRAMP)); \
+ end_addr = force_reg (Pmode, plus_constant ((TRAMP), 32)); \
+ emit_insn (gen_dcacheflush (start_addr, end_addr)); \
+ end_addr = force_reg (Pmode, plus_constant (start_addr, 32)); \
+ emit_insn (gen_icacheflush (start_addr, end_addr, start_addr, \
+ gen_reg_rtx (Pmode), gen_reg_rtx (Pmode)));\
+ } \
}
/* Emit code for a call to builtin_saveregs. We must emit USE insns which
@@ -995,17 +1086,30 @@ extern int may_call_alloca;
/* Include all constant integers and constant doubles, but not
floating-point, except for floating-point zero.
- Reject LABEL_REFs if we're not using gas or the new HP assembler. */
+ Reject LABEL_REFs if we're not using gas or the new HP assembler.
+
+ ?!? For now also reject CONST_DOUBLES in 64bit mode. This will need
+ further work. */
#ifdef NEW_HP_ASSEMBLER
#define LEGITIMATE_CONSTANT_P(X) \
((GET_MODE_CLASS (GET_MODE (X)) != MODE_FLOAT \
|| (X) == CONST0_RTX (GET_MODE (X))) \
+ && !(TARGET_64BIT && GET_CODE (X) == CONST_DOUBLE) \
+ && !(TARGET_64BIT && GET_CODE (X) == CONST_INT \
+ && !(cint_ok_for_move (INTVAL (X)) \
+ || ((INTVAL (X) & 0xffffffff80000000L) == 0xffffffff80000000L) \
+ || ((INTVAL (X) & 0xffffffff00000000L) == 0x0000000000000000L))) \
&& !function_label_operand (X, VOIDmode))
#else
#define LEGITIMATE_CONSTANT_P(X) \
((GET_MODE_CLASS (GET_MODE (X)) != MODE_FLOAT \
|| (X) == CONST0_RTX (GET_MODE (X))) \
&& (GET_CODE (X) != LABEL_REF || TARGET_GAS)\
+ && !(TARGET_64BIT && GET_CODE (X) == CONST_DOUBLE) \
+ && !(TARGET_64BIT && GET_CODE (X) == CONST_INT \
+ && !(cint_ok_for_move (INTVAL (X)) \
+ || ((INTVAL (X) & 0xffffffff80000000L) == 0xffffffff80000000L) \
+ || ((INTVAL (X) & 0xffffffff00000000L) == 0x0000000000000000L))) \
&& !function_label_operand (X, VOIDmode))
#endif
@@ -1064,8 +1168,11 @@ extern int may_call_alloca;
&& !(GET_CODE (XEXP (OP, 0)) == PLUS \
&& (GET_CODE (XEXP (XEXP (OP, 0), 0)) == MULT\
|| GET_CODE (XEXP (XEXP (OP, 0), 1)) == MULT)))\
+ : ((C) == 'U' ? \
+ (GET_CODE (OP) == CONST_INT && INTVAL (OP) == 63) \
: ((C) == 'S' ? \
- (GET_CODE (OP) == CONST_INT && INTVAL (OP) == 31) : 0))))
+ (GET_CODE (OP) == CONST_INT && INTVAL (OP) == 31) : 0)))))
+
/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx
and check its validity for a certain class.
@@ -1115,16 +1222,16 @@ extern int may_call_alloca;
doing so avoids losing for loading/storing a FP register at an address
which will not fit in 5 bits. */
-#define VAL_5_BITS_P(X) ((unsigned)(X) + 0x10 < 0x20)
+#define VAL_5_BITS_P(X) ((unsigned HOST_WIDE_INT)(X) + 0x10 < 0x20)
#define INT_5_BITS(X) VAL_5_BITS_P (INTVAL (X))
-#define VAL_U5_BITS_P(X) ((unsigned)(X) < 0x20)
+#define VAL_U5_BITS_P(X) ((unsigned HOST_WIDE_INT)(X) < 0x20)
#define INT_U5_BITS(X) VAL_U5_BITS_P (INTVAL (X))
-#define VAL_11_BITS_P(X) ((unsigned)(X) + 0x400 < 0x800)
+#define VAL_11_BITS_P(X) ((unsigned HOST_WIDE_INT)(X) + 0x400 < 0x800)
#define INT_11_BITS(X) VAL_11_BITS_P (INTVAL (X))
-#define VAL_14_BITS_P(X) ((unsigned)(X) + 0x2000 < 0x4000)
+#define VAL_14_BITS_P(X) ((unsigned HOST_WIDE_INT)(X) + 0x2000 < 0x4000)
#define INT_14_BITS(X) VAL_14_BITS_P (INTVAL (X))
#define GO_IF_LEGITIMATE_ADDRESS(MODE, X, ADDR) \
@@ -1414,8 +1521,13 @@ while (0)
/* Higher than the default as we prefer to use simple move insns
(better scheduling and delay slot filling) and because our
- built-in block move is really a 2X unrolled loop. */
-#define MOVE_RATIO 4
+ built-in block move is really a 2X unrolled loop.
+
+ Believe it or not, this has to be big enough to allow for copying all
+ arguments passed in registers to avoid infinite recursion during argument
+ setup for a function call. Why? Consider how we copy the stack slots
+ reserved for parameters when they may be trashed by a call. */
+#define MOVE_RATIO (TARGET_64BIT ? 8 : 4)
/* Define if operations between registers always perform the operation
on the full register even if a narrower mode is specified. */
@@ -1441,6 +1553,7 @@ while (0)
/* When a prototype says `char' or `short', really pass an `int'. */
#define PROMOTE_PROTOTYPES 1
+#define PROMOTE_FUNCTION_RETURN 1
/* Specify the machine mode that pointers have.
After generation of rtl, the compiler makes no further distinction
@@ -1867,6 +1980,35 @@ while (0)
/* The number of Pmode words for the setjmp buffer. */
#define JMP_BUF_SIZE 50
+/* Only direct calls to static functions are allowed to be sibling (tail)
+ call optimized.
+
+ This restriction is necessary because some linker generated stubs will
+ store return pointers into rp' in some cases which might clobber a
+ live value already in rp'.
+
+ In a sibcall the current function and the target function share stack
+ space. Thus if the path to the current function and the path to the
+ target function save a value in rp', they save the value into the
+ same stack slot, which has undesirable consequences.
+
+ Because of the deferred binding nature of shared libraries any function
+ with external scope could be in a different load module and thus require
+ rp' to be saved when calling that function. So sibcall optimizations
+ can only be safe for static function.
+
+ Note that GCC never needs return value relocations, so we don't have to
+ worry about static calls with return value relocations (which require
+ saving rp').
+
+ It is safe to perform a sibcall optimization when the target function
+ will never return. */
+#define FUNCTION_OK_FOR_SIBCALL(DECL) \
+ (DECL \
+ && ! TARGET_64BIT \
+ && (! TREE_PUBLIC (DECL) \
+ || TREE_THIS_VOLATILE (DECL)))
+
#define PREDICATE_CODES \
{"reg_or_0_operand", {SUBREG, REG, CONST_INT}}, \
{"call_operand_address", {LABEL_REF, SYMBOL_REF, CONST_INT, \
diff --git a/gcc/config/pa/pa.md b/gcc/config/pa/pa.md
index 7dee889..4504587 100644
--- a/gcc/config/pa/pa.md
+++ b/gcc/config/pa/pa.md
@@ -409,6 +409,20 @@
;; emit RTL for both the compare and the branch.
;;
+(define_expand "cmpdi"
+ [(set (reg:CC 0)
+ (compare:CC (match_operand:DI 0 "reg_or_0_operand" "")
+ (match_operand:DI 1 "register_operand" "")))]
+ "TARGET_64BIT"
+
+ "
+{
+ hppa_compare_op0 = operands[0];
+ hppa_compare_op1 = operands[1];
+ hppa_branch_type = CMP_SI;
+ DONE;
+}")
+
(define_expand "cmpsi"
[(set (reg:CC 0)
(compare:CC (match_operand:SI 0 "reg_or_0_operand" "")
@@ -474,7 +488,7 @@
[(set (match_operand:SI 0 "register_operand" "")
(eq:SI (match_dup 1)
(match_dup 2)))]
- ""
+ "!TARGET_64BIT"
"
{
/* fp scc patterns rarely match, and are not a win on the PA. */
@@ -490,7 +504,7 @@
[(set (match_operand:SI 0 "register_operand" "")
(ne:SI (match_dup 1)
(match_dup 2)))]
- ""
+ "!TARGET_64BIT"
"
{
/* fp scc patterns rarely match, and are not a win on the PA. */
@@ -504,7 +518,7 @@
[(set (match_operand:SI 0 "register_operand" "")
(lt:SI (match_dup 1)
(match_dup 2)))]
- ""
+ "!TARGET_64BIT"
"
{
/* fp scc patterns rarely match, and are not a win on the PA. */
@@ -518,7 +532,7 @@
[(set (match_operand:SI 0 "register_operand" "")
(gt:SI (match_dup 1)
(match_dup 2)))]
- ""
+ "!TARGET_64BIT"
"
{
/* fp scc patterns rarely match, and are not a win on the PA. */
@@ -532,7 +546,7 @@
[(set (match_operand:SI 0 "register_operand" "")
(le:SI (match_dup 1)
(match_dup 2)))]
- ""
+ "!TARGET_64BIT"
"
{
/* fp scc patterns rarely match, and are not a win on the PA. */
@@ -546,7 +560,7 @@
[(set (match_operand:SI 0 "register_operand" "")
(ge:SI (match_dup 1)
(match_dup 2)))]
- ""
+ "!TARGET_64BIT"
"
{
/* fp scc patterns rarely match, and are not a win on the PA. */
@@ -560,7 +574,7 @@
[(set (match_operand:SI 0 "register_operand" "")
(ltu:SI (match_dup 1)
(match_dup 2)))]
- ""
+ "!TARGET_64BIT"
"
{
if (hppa_branch_type != CMP_SI)
@@ -573,7 +587,7 @@
[(set (match_operand:SI 0 "register_operand" "")
(gtu:SI (match_dup 1)
(match_dup 2)))]
- ""
+ "!TARGET_64BIT"
"
{
if (hppa_branch_type != CMP_SI)
@@ -586,7 +600,7 @@
[(set (match_operand:SI 0 "register_operand" "")
(leu:SI (match_dup 1)
(match_dup 2)))]
- ""
+ "!TARGET_64BIT"
"
{
if (hppa_branch_type != CMP_SI)
@@ -599,7 +613,7 @@
[(set (match_operand:SI 0 "register_operand" "")
(geu:SI (match_dup 1)
(match_dup 2)))]
- ""
+ "!TARGET_64BIT"
"
{
if (hppa_branch_type != CMP_SI)
@@ -621,6 +635,16 @@
[(set_attr "type" "binary")
(set_attr "length" "8")])
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (match_operator:DI 3 "comparison_operator"
+ [(match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "arith11_operand" "rI")]))]
+ "TARGET_64BIT"
+ "cmp%I2clr,*%B3 %2,%1,%0\;ldi 1,%0"
+ [(set_attr "type" "binary")
+ (set_attr "length" "8")])
+
(define_insn "iorscc"
[(set (match_operand:SI 0 "register_operand" "=r")
(ior:SI (match_operator:SI 3 "comparison_operator"
@@ -634,6 +658,19 @@
[(set_attr "type" "binary")
(set_attr "length" "12")])
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ior:DI (match_operator:DI 3 "comparison_operator"
+ [(match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "arith11_operand" "rI")])
+ (match_operator:DI 6 "comparison_operator"
+ [(match_operand:DI 4 "register_operand" "r")
+ (match_operand:DI 5 "arith11_operand" "rI")])))]
+ "TARGET_64BIT"
+ "cmp%I2clr,*%S3 %2,%1,%%r0\;cmp%I5clr,*%B6 %5,%4,%0\;ldi 1,%0"
+ [(set_attr "type" "binary")
+ (set_attr "length" "12")])
+
;; Combiner patterns for common operations performed with the output
;; from an scc insn (negscc and incscc).
(define_insn "negscc"
@@ -646,6 +683,16 @@
[(set_attr "type" "binary")
(set_attr "length" "8")])
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (neg:DI (match_operator:DI 3 "comparison_operator"
+ [(match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "arith11_operand" "rI")])))]
+ "TARGET_64BIT"
+ "cmp%I2clr,*%B3 %2,%1,%0\;ldi -1,%0"
+ [(set_attr "type" "binary")
+ (set_attr "length" "8")])
+
;; Patterns for adding/subtracting the result of a boolean expression from
;; a register. First we have special patterns that make use of the carry
;; bit, and output only two instructions. For the cases we can't in
@@ -662,6 +709,16 @@
[(set_attr "type" "binary")
(set_attr "length" "8")])
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (plus:DI (leu:DI (match_operand:DI 2 "register_operand" "r")
+ (match_operand:DI 3 "arith11_operand" "rI"))
+ (match_operand:DI 1 "register_operand" "r")))]
+ "TARGET_64BIT"
+ "sub%I3 %3,%2,%%r0\;add,dc %%r0,%1,%0"
+ [(set_attr "type" "binary")
+ (set_attr "length" "8")])
+
; This need only accept registers for op3, since canonicalization
; replaces geu with gtu when op3 is an integer.
(define_insn ""
@@ -674,6 +731,16 @@
[(set_attr "type" "binary")
(set_attr "length" "8")])
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (plus:DI (geu:DI (match_operand:DI 2 "register_operand" "r")
+ (match_operand:DI 3 "register_operand" "r"))
+ (match_operand:DI 1 "register_operand" "r")))]
+ "TARGET_64BIT"
+ "sub %2,%3,%%r0\;add,dc %%r0,%1,%0"
+ [(set_attr "type" "binary")
+ (set_attr "length" "8")])
+
; Match only integers for op3 here. This is used as canonical form of the
; geu pattern when op3 is an integer. Don't match registers since we can't
; make better code than the general incscc pattern.
@@ -687,6 +754,16 @@
[(set_attr "type" "binary")
(set_attr "length" "8")])
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (plus:DI (gtu:DI (match_operand:DI 2 "register_operand" "r")
+ (match_operand:DI 3 "int11_operand" "I"))
+ (match_operand:DI 1 "register_operand" "r")))]
+ "TARGET_64BIT"
+ "addi %k3,%2,%%r0\;add,dc %%r0,%1,%0"
+ [(set_attr "type" "binary")
+ (set_attr "length" "8")])
+
(define_insn "incscc"
[(set (match_operand:SI 0 "register_operand" "=r,r")
(plus:SI (match_operator:SI 4 "comparison_operator"
@@ -701,6 +778,19 @@
(set_attr "length" "8,12")])
(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (plus:DI (match_operator:DI 4 "comparison_operator"
+ [(match_operand:DI 2 "register_operand" "r,r")
+ (match_operand:DI 3 "arith11_operand" "rI,rI")])
+ (match_operand:DI 1 "register_operand" "0,?r")))]
+ "TARGET_64BIT"
+ "@
+ cmp%I3clr,*%B4 %3,%2,%%r0\;addi 1,%0,%0
+ cmp%I3clr,*%B4 %3,%2,%%r0\;addi,tr 1,%1,%0\;copy %1,%0"
+ [(set_attr "type" "binary,binary")
+ (set_attr "length" "8,12")])
+
+(define_insn ""
[(set (match_operand:SI 0 "register_operand" "=r")
(minus:SI (match_operand:SI 1 "register_operand" "r")
(gtu:SI (match_operand:SI 2 "register_operand" "r")
@@ -711,6 +801,16 @@
(set_attr "length" "8")])
(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (minus:DI (match_operand:DI 1 "register_operand" "r")
+ (gtu:DI (match_operand:DI 2 "register_operand" "r")
+ (match_operand:DI 3 "arith11_operand" "rI"))))]
+ "TARGET_64BIT"
+ "sub%I3 %3,%2,%%r0\;sub,db %1,%%r0,%0"
+ [(set_attr "type" "binary")
+ (set_attr "length" "8")])
+
+(define_insn ""
[(set (match_operand:SI 0 "register_operand" "=r")
(minus:SI (minus:SI (match_operand:SI 1 "register_operand" "r")
(gtu:SI (match_operand:SI 2 "register_operand" "r")
@@ -721,6 +821,17 @@
[(set_attr "type" "binary")
(set_attr "length" "8")])
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (minus:DI (minus:DI (match_operand:DI 1 "register_operand" "r")
+ (gtu:DI (match_operand:DI 2 "register_operand" "r")
+ (match_operand:DI 3 "arith11_operand" "rI")))
+ (match_operand:DI 4 "register_operand" "r")))]
+ "TARGET_64BIT"
+ "sub%I3 %3,%2,%%r0\;sub,db %1,%4,%0"
+ [(set_attr "type" "binary")
+ (set_attr "length" "8")])
+
; This need only accept registers for op3, since canonicalization
; replaces ltu with leu when op3 is an integer.
(define_insn ""
@@ -734,6 +845,16 @@
(set_attr "length" "8")])
(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (minus:DI (match_operand:DI 1 "register_operand" "r")
+ (ltu:DI (match_operand:DI 2 "register_operand" "r")
+ (match_operand:DI 3 "register_operand" "r"))))]
+ "TARGET_64BIT"
+ "sub %2,%3,%%r0\;sub,db %1,%%r0,%0"
+ [(set_attr "type" "binary")
+ (set_attr "length" "8")])
+
+(define_insn ""
[(set (match_operand:SI 0 "register_operand" "=r")
(minus:SI (minus:SI (match_operand:SI 1 "register_operand" "r")
(ltu:SI (match_operand:SI 2 "register_operand" "r")
@@ -744,6 +865,17 @@
[(set_attr "type" "binary")
(set_attr "length" "8")])
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (minus:DI (minus:DI (match_operand:DI 1 "register_operand" "r")
+ (ltu:DI (match_operand:DI 2 "register_operand" "r")
+ (match_operand:DI 3 "register_operand" "r")))
+ (match_operand:DI 4 "register_operand" "r")))]
+ "TARGET_64BIT"
+ "sub %2,%3,%%r0\;sub,db %1,%4,%0"
+ [(set_attr "type" "binary")
+ (set_attr "length" "8")])
+
; Match only integers for op3 here. This is used as canonical form of the
; ltu pattern when op3 is an integer. Don't match registers since we can't
; make better code than the general incscc pattern.
@@ -758,6 +890,16 @@
(set_attr "length" "8")])
(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (minus:DI (match_operand:DI 1 "register_operand" "r")
+ (leu:DI (match_operand:DI 2 "register_operand" "r")
+ (match_operand:DI 3 "int11_operand" "I"))))]
+ "TARGET_64BIT"
+ "addi %k3,%2,%%r0\;sub,db %1,%%r0,%0"
+ [(set_attr "type" "binary")
+ (set_attr "length" "8")])
+
+(define_insn ""
[(set (match_operand:SI 0 "register_operand" "=r")
(minus:SI (minus:SI (match_operand:SI 1 "register_operand" "r")
(leu:SI (match_operand:SI 2 "register_operand" "r")
@@ -768,6 +910,17 @@
[(set_attr "type" "binary")
(set_attr "length" "8")])
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (minus:DI (minus:DI (match_operand:DI 1 "register_operand" "r")
+ (leu:DI (match_operand:DI 2 "register_operand" "r")
+ (match_operand:DI 3 "int11_operand" "I")))
+ (match_operand:DI 4 "register_operand" "r")))]
+ "TARGET_64BIT"
+ "addi %k3,%2,%%r0\;sub,db %1,%4,%0"
+ [(set_attr "type" "binary")
+ (set_attr "length" "8")])
+
(define_insn "decscc"
[(set (match_operand:SI 0 "register_operand" "=r,r")
(minus:SI (match_operand:SI 1 "register_operand" "0,?r")
@@ -781,6 +934,19 @@
[(set_attr "type" "binary,binary")
(set_attr "length" "8,12")])
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (minus:DI (match_operand:DI 1 "register_operand" "0,?r")
+ (match_operator:DI 4 "comparison_operator"
+ [(match_operand:DI 2 "register_operand" "r,r")
+ (match_operand:DI 3 "arith11_operand" "rI,rI")])))]
+ "TARGET_64BIT"
+ "@
+ cmp%I3clr,*%B4 %3,%2,%%r0\;addi -1,%0,%0
+ cmp%I3clr,*%B4 %3,%2,%%r0\;addi,tr -1,%1,%0\;copy %1,%0"
+ [(set_attr "type" "binary,binary")
+ (set_attr "length" "8,12")])
+
; Patterns for max and min. (There is no need for an earlyclobber in the
; last alternative since the middle alternative will match if op0 == op1.)
@@ -796,6 +962,18 @@
[(set_attr "type" "multi,multi,multi")
(set_attr "length" "8,8,8")])
+(define_insn "smindi3"
+ [(set (match_operand:DI 0 "register_operand" "=r,r,r")
+ (smin:DI (match_operand:DI 1 "register_operand" "%0,0,r")
+ (match_operand:DI 2 "arith11_operand" "r,I,M")))]
+ "TARGET_64BIT"
+ "@
+ cmpclr,*> %2,%0,%%r0\;copy %2,%0
+ cmpiclr,*> %2,%0,%%r0\;ldi %2,%0
+ cmpclr,*> %1,%r2,%0\;copy %1,%0"
+[(set_attr "type" "multi,multi,multi")
+ (set_attr "length" "8,8,8")])
+
(define_insn "uminsi3"
[(set (match_operand:SI 0 "register_operand" "=r,r")
(umin:SI (match_operand:SI 1 "register_operand" "%0,0")
@@ -807,6 +985,17 @@
[(set_attr "type" "multi,multi")
(set_attr "length" "8,8")])
+(define_insn "umindi3"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (umin:DI (match_operand:DI 1 "register_operand" "%0,0")
+ (match_operand:DI 2 "arith11_operand" "r,I")))]
+ "TARGET_64BIT"
+ "@
+ cmpclr,*>> %2,%0,%%r0\;copy %2,%0
+ cmpiclr,*>> %2,%0,%%r0\;ldi %2,%0"
+[(set_attr "type" "multi,multi")
+ (set_attr "length" "8,8")])
+
(define_insn "smaxsi3"
[(set (match_operand:SI 0 "register_operand" "=r,r,r")
(smax:SI (match_operand:SI 1 "register_operand" "%0,0,r")
@@ -819,6 +1008,18 @@
[(set_attr "type" "multi,multi,multi")
(set_attr "length" "8,8,8")])
+(define_insn "smaxdi3"
+ [(set (match_operand:DI 0 "register_operand" "=r,r,r")
+ (smax:DI (match_operand:DI 1 "register_operand" "%0,0,r")
+ (match_operand:DI 2 "arith11_operand" "r,I,M")))]
+ "TARGET_64BIT"
+ "@
+ cmpclr,*< %2,%0,%%r0\;copy %2,%0
+ cmpiclr,*< %2,%0,%%r0\;ldi %2,%0
+ cmpclr,*< %1,%r2,%0\;copy %1,%0"
+[(set_attr "type" "multi,multi,multi")
+ (set_attr "length" "8,8,8")])
+
(define_insn "umaxsi3"
[(set (match_operand:SI 0 "register_operand" "=r,r")
(umax:SI (match_operand:SI 1 "register_operand" "%0,0")
@@ -830,6 +1031,17 @@
[(set_attr "type" "multi,multi")
(set_attr "length" "8,8")])
+(define_insn "umaxdi3"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (umax:DI (match_operand:DI 1 "register_operand" "%0,0")
+ (match_operand:DI 2 "arith11_operand" "r,I")))]
+ "TARGET_64BIT"
+ "@
+ cmpclr,*<< %2,%0,%%r0\;copy %2,%0
+ cmpiclr,*<< %2,%0,%%r0\;ldi %2,%0"
+[(set_attr "type" "multi,multi")
+ (set_attr "length" "8,8")])
+
(define_insn "abssi2"
[(set (match_operand:SI 0 "register_operand" "=r")
(abs:SI (match_operand:SI 1 "register_operand" "r")))]
@@ -838,6 +1050,14 @@
[(set_attr "type" "multi")
(set_attr "length" "8")])
+(define_insn "absdi2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (abs:DI (match_operand:DI 1 "register_operand" "r")))]
+ "TARGET_64BIT"
+ "or,*>= %%r0,%1,%0\;subi 0,%0,%0"
+ [(set_attr "type" "multi")
+ (set_attr "length" "8")])
+
;;; Experimental conditional move patterns
(define_expand "movsicc"
@@ -856,6 +1076,10 @@
if (hppa_branch_type != CMP_SI)
FAIL;
+ if (GET_MODE (hppa_compare_op0) != GET_MODE (hppa_compare_op1)
+ || GET_MODE (hppa_compare_op0) != GET_MODE (operands[0]))
+ FAIL;
+
/* operands[1] is currently the result of compare_from_rtx. We want to
emit a compare of the original operands. */
operands[1] = gen_rtx_fmt_ee (code, SImode, hppa_compare_op0, hppa_compare_op1);
@@ -909,6 +1133,74 @@
[(set_attr "type" "multi,multi,multi,nullshift,multi,multi,multi,nullshift")
(set_attr "length" "8,8,8,8,8,8,8,8")])
+(define_expand "movdicc"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (if_then_else:DI
+ (match_operator 1 "comparison_operator"
+ [(match_dup 4)
+ (match_dup 5)])
+ (match_operand:DI 2 "reg_or_cint_move_operand" "")
+ (match_operand:DI 3 "reg_or_cint_move_operand" "")))]
+ "TARGET_64BIT"
+ "
+{
+ enum rtx_code code = GET_CODE (operands[1]);
+
+ if (hppa_branch_type != CMP_SI)
+ FAIL;
+
+ if (GET_MODE (hppa_compare_op0) != GET_MODE (hppa_compare_op1)
+ || GET_MODE (hppa_compare_op0) != GET_MODE (operands[0]))
+ FAIL;
+
+ /* operands[1] is currently the result of compare_from_rtx. We want to
+ emit a compare of the original operands. */
+ operands[1] = gen_rtx_fmt_ee (code, DImode, hppa_compare_op0, hppa_compare_op1);
+ operands[4] = hppa_compare_op0;
+ operands[5] = hppa_compare_op1;
+}")
+
+; We need the first constraint alternative in order to avoid
+; earlyclobbers on all other alternatives.
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r,r,r,r,r")
+ (if_then_else:DI
+ (match_operator 5 "comparison_operator"
+ [(match_operand:DI 3 "register_operand" "r,r,r,r,r")
+ (match_operand:DI 4 "arith11_operand" "rI,rI,rI,rI,rI")])
+ (match_operand:DI 1 "reg_or_cint_move_operand" "0,r,J,N,K")
+ (const_int 0)))]
+ "TARGET_64BIT"
+ "@
+ cmp%I4clr,*%S5 %4,%3,%%r0\;ldi 0,%0
+ cmp%I4clr,*%B5 %4,%3,%0\;copy %1,%0
+ cmp%I4clr,*%B5 %4,%3,%0\;ldi %1,%0
+ cmp%I4clr,*%B5 %4,%3,%0\;ldil L'%1,%0
+ cmp%I4clr,*%B5 %4,%3,%0\;depdi,z %z1,%0"
+ [(set_attr "type" "multi,multi,multi,multi,nullshift")
+ (set_attr "length" "8,8,8,8,8")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r,r,r,r,r,r,r,r")
+ (if_then_else:DI
+ (match_operator 5 "comparison_operator"
+ [(match_operand:DI 3 "register_operand" "r,r,r,r,r,r,r,r")
+ (match_operand:DI 4 "arith11_operand" "rI,rI,rI,rI,rI,rI,rI,rI")])
+ (match_operand:DI 1 "reg_or_cint_move_operand" "0,0,0,0,r,J,N,K")
+ (match_operand:DI 2 "reg_or_cint_move_operand" "r,J,N,K,0,0,0,0")))]
+ "TARGET_64BIT"
+ "@
+ cmp%I4clr,*%S5 %4,%3,%%r0\;copy %2,%0
+ cmp%I4clr,*%S5 %4,%3,%%r0\;ldi %2,%0
+ cmp%I4clr,*%S5 %4,%3,%%r0\;ldil L'%2,%0
+ cmp%I4clr,*%S5 %4,%3,%%r0\;depdi,z %z2,%0
+ cmp%I4clr,*%B5 %4,%3,%%r0\;copy %1,%0
+ cmp%I4clr,*%B5 %4,%3,%%r0\;ldi %1,%0
+ cmp%I4clr,*%B5 %4,%3,%%r0\;ldil L'%1,%0
+ cmp%I4clr,*%B5 %4,%3,%%r0\;depdi,z %z1,%0"
+ [(set_attr "type" "multi,multi,multi,nullshift,multi,multi,multi,nullshift")
+ (set_attr "length" "8,8,8,8,8,8,8,8")])
+
;; Conditional Branches
(define_expand "beq"
@@ -1136,6 +1428,113 @@
(const_int 20)]
(const_int 28)))])
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (match_operator 3 "comparison_operator"
+ [(match_operand:DI 1 "reg_or_0_operand" "rM")
+ (match_operand:DI 2 "reg_or_0_operand" "rM")])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ "TARGET_64BIT"
+ "*
+{
+ return output_cbranch (operands, INSN_ANNULLED_BRANCH_P (insn),
+ get_attr_length (insn), 0, insn);
+}"
+[(set_attr "type" "cbranch")
+ (set (attr "length")
+ (cond [(lt (abs (minus (match_dup 0) (plus (pc) (const_int 8))))
+ (const_int 8184))
+ (const_int 4)
+ (lt (abs (minus (match_dup 0) (plus (pc) (const_int 8))))
+ (const_int 262100))
+ (const_int 8)
+ (eq (symbol_ref "flag_pic") (const_int 0))
+ (const_int 20)]
+ (const_int 28)))])
+
+;; Match the negated branch.
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (match_operator 3 "comparison_operator"
+ [(match_operand:DI 1 "reg_or_0_operand" "rM")
+ (match_operand:DI 2 "reg_or_0_operand" "rM")])
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ "TARGET_64BIT"
+ "*
+{
+ return output_cbranch (operands, INSN_ANNULLED_BRANCH_P (insn),
+ get_attr_length (insn), 1, insn);
+}"
+[(set_attr "type" "cbranch")
+ (set (attr "length")
+ (cond [(lt (abs (minus (match_dup 0) (plus (pc) (const_int 8))))
+ (const_int 8184))
+ (const_int 4)
+ (lt (abs (minus (match_dup 0) (plus (pc) (const_int 8))))
+ (const_int 262100))
+ (const_int 8)
+ (eq (symbol_ref "flag_pic") (const_int 0))
+ (const_int 20)]
+ (const_int 28)))])
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (match_operator 3 "cmpib_comparison_operator"
+ [(match_operand:DI 1 "reg_or_0_operand" "rM")
+ (match_operand:DI 2 "arith5_operand" "rL")])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ "TARGET_64BIT"
+ "*
+{
+ return output_cbranch (operands, INSN_ANNULLED_BRANCH_P (insn),
+ get_attr_length (insn), 0, insn);
+}"
+[(set_attr "type" "cbranch")
+ (set (attr "length")
+ (cond [(lt (abs (minus (match_dup 0) (plus (pc) (const_int 8))))
+ (const_int 8184))
+ (const_int 4)
+ (lt (abs (minus (match_dup 0) (plus (pc) (const_int 8))))
+ (const_int 262100))
+ (const_int 8)
+ (eq (symbol_ref "flag_pic") (const_int 0))
+ (const_int 20)]
+ (const_int 28)))])
+
+;; Match the negated branch.
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (match_operator 3 "cmpib_comparison_operator"
+ [(match_operand:DI 1 "reg_or_0_operand" "rM")
+ (match_operand:DI 2 "arith5_operand" "rL")])
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ "TARGET_64BIT"
+ "*
+{
+ return output_cbranch (operands, INSN_ANNULLED_BRANCH_P (insn),
+ get_attr_length (insn), 1, insn);
+}"
+[(set_attr "type" "cbranch")
+ (set (attr "length")
+ (cond [(lt (abs (minus (match_dup 0) (plus (pc) (const_int 8))))
+ (const_int 8184))
+ (const_int 4)
+ (lt (abs (minus (match_dup 0) (plus (pc) (const_int 8))))
+ (const_int 262100))
+ (const_int 8)
+ (eq (symbol_ref "flag_pic") (const_int 0))
+ (const_int 20)]
+ (const_int 28)))])
+
;; Branch on Bit patterns.
(define_insn ""
[(set (pc)
@@ -1162,6 +1561,28 @@
(define_insn ""
[(set (pc)
(if_then_else
+ (ne (zero_extract:DI (match_operand:DI 0 "register_operand" "r")
+ (const_int 1)
+ (match_operand:DI 1 "uint32_operand" ""))
+ (const_int 0))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))]
+ "TARGET_64BIT"
+ "*
+{
+ return output_bb (operands, INSN_ANNULLED_BRANCH_P (insn),
+ get_attr_length (insn), 0, insn, 0);
+}"
+[(set_attr "type" "cbranch")
+ (set (attr "length")
+ (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int 8184))
+ (const_int 4)
+ (const_int 8)))])
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
(ne (zero_extract:SI (match_operand:SI 0 "register_operand" "r")
(const_int 1)
(match_operand:SI 1 "uint5_operand" ""))
@@ -1184,6 +1605,28 @@
(define_insn ""
[(set (pc)
(if_then_else
+ (ne (zero_extract:DI (match_operand:DI 0 "register_operand" "r")
+ (const_int 1)
+ (match_operand:DI 1 "uint32_operand" ""))
+ (const_int 0))
+ (pc)
+ (label_ref (match_operand 2 "" ""))))]
+ "TARGET_64BIT"
+ "*
+{
+ return output_bb (operands, INSN_ANNULLED_BRANCH_P (insn),
+ get_attr_length (insn), 1, insn, 0);
+}"
+[(set_attr "type" "cbranch")
+ (set (attr "length")
+ (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int 8184))
+ (const_int 4)
+ (const_int 8)))])
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
(eq (zero_extract:SI (match_operand:SI 0 "register_operand" "r")
(const_int 1)
(match_operand:SI 1 "uint5_operand" ""))
@@ -1206,6 +1649,28 @@
(define_insn ""
[(set (pc)
(if_then_else
+ (eq (zero_extract:DI (match_operand:DI 0 "register_operand" "r")
+ (const_int 1)
+ (match_operand:DI 1 "uint32_operand" ""))
+ (const_int 0))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))]
+ "TARGET_64BIT"
+ "*
+{
+ return output_bb (operands, INSN_ANNULLED_BRANCH_P (insn),
+ get_attr_length (insn), 0, insn, 1);
+}"
+[(set_attr "type" "cbranch")
+ (set (attr "length")
+ (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int 8184))
+ (const_int 4)
+ (const_int 8)))])
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
(eq (zero_extract:SI (match_operand:SI 0 "register_operand" "r")
(const_int 1)
(match_operand:SI 1 "uint5_operand" ""))
@@ -1225,6 +1690,28 @@
(const_int 4)
(const_int 8)))])
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (eq (zero_extract:DI (match_operand:DI 0 "register_operand" "r")
+ (const_int 1)
+ (match_operand:DI 1 "uint32_operand" ""))
+ (const_int 0))
+ (pc)
+ (label_ref (match_operand 2 "" ""))))]
+ "TARGET_64BIT"
+ "*
+{
+ return output_bb (operands, INSN_ANNULLED_BRANCH_P (insn),
+ get_attr_length (insn), 1, insn, 1);
+}"
+[(set_attr "type" "cbranch")
+ (set (attr "length")
+ (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int 8184))
+ (const_int 4)
+ (const_int 8)))])
+
;; Branch on Variable Bit patterns.
(define_insn ""
[(set (pc)
@@ -1251,6 +1738,28 @@
(define_insn ""
[(set (pc)
(if_then_else
+ (ne (zero_extract:DI (match_operand:DI 0 "register_operand" "r")
+ (const_int 1)
+ (match_operand:DI 1 "register_operand" "q"))
+ (const_int 0))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))]
+ "TARGET_64BIT"
+ "*
+{
+ return output_bvb (operands, INSN_ANNULLED_BRANCH_P (insn),
+ get_attr_length (insn), 0, insn, 0);
+}"
+[(set_attr "type" "cbranch")
+ (set (attr "length")
+ (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int 8184))
+ (const_int 4)
+ (const_int 8)))])
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
(ne (zero_extract:SI (match_operand:SI 0 "register_operand" "r")
(const_int 1)
(match_operand:SI 1 "register_operand" "q"))
@@ -1273,6 +1782,28 @@
(define_insn ""
[(set (pc)
(if_then_else
+ (ne (zero_extract:DI (match_operand:DI 0 "register_operand" "r")
+ (const_int 1)
+ (match_operand:DI 1 "register_operand" "q"))
+ (const_int 0))
+ (pc)
+ (label_ref (match_operand 2 "" ""))))]
+ "TARGET_64BIT"
+ "*
+{
+ return output_bvb (operands, INSN_ANNULLED_BRANCH_P (insn),
+ get_attr_length (insn), 1, insn, 0);
+}"
+[(set_attr "type" "cbranch")
+ (set (attr "length")
+ (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int 8184))
+ (const_int 4)
+ (const_int 8)))])
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
(eq (zero_extract:SI (match_operand:SI 0 "register_operand" "r")
(const_int 1)
(match_operand:SI 1 "register_operand" "q"))
@@ -1295,6 +1826,28 @@
(define_insn ""
[(set (pc)
(if_then_else
+ (eq (zero_extract:DI (match_operand:DI 0 "register_operand" "r")
+ (const_int 1)
+ (match_operand:DI 1 "register_operand" "q"))
+ (const_int 0))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))]
+ "TARGET_64BIT"
+ "*
+{
+ return output_bvb (operands, INSN_ANNULLED_BRANCH_P (insn),
+ get_attr_length (insn), 0, insn, 1);
+}"
+[(set_attr "type" "cbranch")
+ (set (attr "length")
+ (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int 8184))
+ (const_int 4)
+ (const_int 8)))])
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
(eq (zero_extract:SI (match_operand:SI 0 "register_operand" "r")
(const_int 1)
(match_operand:SI 1 "register_operand" "q"))
@@ -1314,6 +1867,28 @@
(const_int 4)
(const_int 8)))])
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (eq (zero_extract:DI (match_operand:DI 0 "register_operand" "r")
+ (const_int 1)
+ (match_operand:DI 1 "register_operand" "q"))
+ (const_int 0))
+ (pc)
+ (label_ref (match_operand 2 "" ""))))]
+ "TARGET_64BIT"
+ "*
+{
+ return output_bvb (operands, INSN_ANNULLED_BRANCH_P (insn),
+ get_attr_length (insn), 1, insn, 1);
+}"
+[(set_attr "type" "cbranch")
+ (set (attr "length")
+ (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int 8184))
+ (const_int 4)
+ (const_int 8)))])
+
;; Floating point branches
(define_insn ""
[(set (pc) (if_then_else (ne (reg:CCFP 0) (const_int 0))
@@ -1493,6 +2068,11 @@
""
"
{
+ if (TARGET_64BIT)
+ {
+ emit_insn (gen_pre_ldd (operands[0], operands[1], operands[2]));
+ DONE;
+ }
emit_insn (gen_pre_ldw (operands[0], operands[1], operands[2]));
DONE;
}")
@@ -1513,6 +2093,17 @@
[(set_attr "type" "load")
(set_attr "length" "4")])
+(define_insn "pre_ldd"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (mem:DI (plus:DI (match_operand:DI 1 "register_operand" "+r")
+ (match_operand:DI 2 "pre_cint_operand" ""))))
+ (set (match_dup 1)
+ (plus:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_64BIT"
+ "ldd,mb %2(%1),%0"
+ [(set_attr "type" "load")
+ (set_attr "length" "4")])
+
(define_insn ""
[(set (mem:SI (plus:SI (match_operand:SI 0 "register_operand" "+r")
(match_operand:SI 1 "pre_cint_operand" "")))
@@ -1554,6 +2145,11 @@
""
"
{
+ if (TARGET_64BIT)
+ {
+ emit_insn (gen_post_std (operands[0], operands[1], operands[2]));
+ DONE;
+ }
emit_insn (gen_post_stw (operands[0], operands[1], operands[2]));
DONE;
}")
@@ -1574,6 +2170,17 @@
[(set_attr "type" "store")
(set_attr "length" "4")])
+(define_insn "post_std"
+ [(set (mem:DI (match_operand:DI 0 "register_operand" "+r"))
+ (match_operand:DI 1 "reg_or_0_operand" "rM"))
+ (set (match_dup 0)
+ (plus:DI (match_dup 0)
+ (match_operand:DI 2 "post_cint_operand" "")))]
+ "TARGET_64BIT"
+ "std,ma %r1,%2(%0)"
+ [(set_attr "type" "store")
+ (set_attr "length" "4")])
+
;; For loading the address of a label while generating PIC code.
;; Note since this pattern can be created at reload time (via movsi), all
;; the same rules for movsi apply here. (no new pseudos, no temporaries).
@@ -1627,6 +2234,18 @@
[(set_attr "type" "binary")
(set_attr "length" "4")])
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=a")
+ (plus:DI (match_operand:DI 1 "register_operand" "r")
+ (high:DI (match_operand 2 "" ""))))]
+ "symbolic_operand (operands[2], Pmode)
+ && ! function_label_operand (operands[2], Pmode)
+ && TARGET_64BIT
+ && flag_pic == 2"
+ "addil LT'%G2,%1"
+ [(set_attr "type" "binary")
+ (set_attr "length" "4")])
+
; We need this to make sure CSE doesn't simplify a memory load with a
; symbolic address, whose content it think it knows. For PIC, what CSE
; think is the real value will be the address of that value.
@@ -1646,6 +2265,22 @@
[(set_attr "type" "load")
(set_attr "length" "4")])
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (mem:DI
+ (lo_sum:DI (match_operand:DI 1 "register_operand" "r")
+ (unspec:DI
+ [(match_operand:DI 2 "symbolic_operand" "")] 0))))]
+ "TARGET_64BIT"
+ "*
+{
+ if (flag_pic != 2)
+ abort ();
+ return \"ldd RT'%G2(%1),%0\";
+}"
+ [(set_attr "type" "load")
+ (set_attr "length" "4")])
+
;; Always use addil rather than ldil;add sequences. This allows the
;; HP linker to eliminate the dp relocation if the symbolic operand
;; lives in the TEXT space.
@@ -1687,6 +2322,17 @@
(set_attr "length" "4,8")])
(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=!a,*r")
+ (plus:DI (match_operand:DI 1 "register_operand" "r,r")
+ (high:DI (match_operand 2 "const_int_operand" ""))))]
+ "reload_completed && TARGET_64BIT"
+ "@
+ addil L'%G2,%1
+ ldil L'%G2,%0\;{addl|add,l} %0,%1,%0"
+ [(set_attr "type" "binary,binary")
+ (set_attr "length" "4,8")])
+
+(define_insn ""
[(set (match_operand:SI 0 "register_operand" "=r")
(high:SI (match_operand 1 "" "")))]
"(!flag_pic || !symbolic_operand (operands[1], Pmode))
@@ -1702,6 +2348,23 @@
(set_attr "length" "4")])
(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (high:DI (match_operand 1 "const_int_operand" "")))]
+ "TARGET_64BIT"
+ "ldil L'%G1,%0";
+ [(set_attr "type" "move")
+ (set_attr "length" "4")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (lo_sum:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "const_int_operand" "i")))]
+ "TARGET_64BIT"
+ "ldo R'%G2(%1),%0";
+ [(set_attr "type" "move")
+ (set_attr "length" "4")])
+
+(define_insn ""
[(set (match_operand:SI 0 "register_operand" "=r")
(lo_sum:SI (match_operand:SI 1 "register_operand" "r")
(match_operand:SI 2 "immediate_operand" "i")))]
@@ -2149,7 +2812,7 @@
(clobber (match_dup 6))
(use (match_operand:SI 2 "arith_operand" ""))
(use (match_operand:SI 3 "const_int_operand" ""))])]
- ""
+ "!TARGET_64BIT"
"
{
int size, align;
@@ -2222,7 +2885,7 @@
(clobber (match_operand:SI 6 "register_operand" "=&r,&r")) ;item tmp2
(use (match_operand:SI 4 "arith_operand" "J,2")) ;byte count
(use (match_operand:SI 5 "const_int_operand" "n,n"))] ;alignment
- ""
+ "!TARGET_64BIT"
"* return output_block_move (operands, !which_alternative);"
[(set_attr "type" "multi,multi")])
@@ -2242,6 +2905,7 @@
(match_operand:DF 1 "" "?F,m"))]
"GET_CODE (operands[1]) == CONST_DOUBLE
&& operands[1] != CONST0_RTX (DFmode)
+ && !TARGET_64BIT
&& ! TARGET_SOFT_FLOAT"
"* return (which_alternative == 0 ? output_move_double (operands)
: \"fldd%F1 %1,%0\");"
@@ -2254,6 +2918,9 @@
""
"
{
+ if (GET_CODE (operands[1]) == CONST_DOUBLE && TARGET_64BIT)
+ operands[1] = force_const_mem (DFmode, operands[1]);
+
if (emit_move_sequence (operands, DFmode, 0))
DONE;
}")
@@ -2300,6 +2967,7 @@
|| reg_or_0_operand (operands[1], DFmode))
&& ! (GET_CODE (operands[1]) == CONST_DOUBLE
&& GET_CODE (operands[0]) == MEM)
+ && ! TARGET_64BIT
&& ! TARGET_SOFT_FLOAT"
"*
{
@@ -2318,6 +2986,7 @@
"rG,r,r,o,Q"))]
"(register_operand (operands[0], DFmode)
|| reg_or_0_operand (operands[1], DFmode))
+ && ! TARGET_64BIT
&& TARGET_SOFT_FLOAT"
"*
{
@@ -2327,6 +2996,29 @@
(set_attr "length" "8,8,16,8,16")])
(define_insn ""
+ [(set (match_operand:DF 0 "reg_or_nonsymb_mem_operand"
+ "=r,r,r,r,r,Q,*q,!f,f,*TR")
+ (match_operand:DF 1 "move_operand"
+ "r,J,N,K,RQ,rM,rM,!fM,*RT,f"))]
+ "(register_operand (operands[0], DFmode)
+ || reg_or_0_operand (operands[1], DFmode))
+ && ! TARGET_SOFT_FLOAT && TARGET_64BIT"
+ "@
+ copy %1,%0
+ ldi %1,%0
+ ldil L'%1,%0
+ depdi,z %z1,%0
+ ldd%M1 %1,%0
+ std%M0 %r1,%0
+ mtsar %r1
+ fcpy,dbl %f1,%0
+ fldd%F1 %1,%0
+ fstd%F0 %1,%0"
+ [(set_attr "type" "move,move,move,shift,load,store,move,fpalu,fpload,fpstore")
+ (set_attr "pa_combine_type" "addmove")
+ (set_attr "length" "4,4,4,4,4,4,4,4,4,4")])
+
+(define_insn ""
[(set (match_operand:DF 0 "register_operand" "=fx")
(mem:DF (plus:SI (match_operand:SI 1 "basereg_operand" "r")
(match_operand:SI 2 "register_operand" "r"))))]
@@ -2408,6 +3100,9 @@
""
"
{
+ if (GET_CODE (operands[1]) == CONST_DOUBLE && TARGET_64BIT)
+ operands[1] = force_const_mem (DImode, operands[1]);
+
if (emit_move_sequence (operands, DImode, 0))
DONE;
}")
@@ -2445,7 +3140,7 @@
(define_insn ""
[(set (match_operand:DI 0 "register_operand" "=r")
(high:DI (match_operand 1 "" "")))]
- ""
+ "!TARGET_64BIT"
"*
{
rtx op0 = operands[0];
@@ -2487,6 +3182,7 @@
"rM,r,r,o*R,Q,i,fM,*TR,f"))]
"(register_operand (operands[0], DImode)
|| reg_or_0_operand (operands[1], DImode))
+ && ! TARGET_64BIT
&& ! TARGET_SOFT_FLOAT"
"*
{
@@ -2500,11 +3196,35 @@
(define_insn ""
[(set (match_operand:DI 0 "reg_or_nonsymb_mem_operand"
+ "=r,r,r,r,r,Q,*q,!f,f,*TR")
+ (match_operand:DI 1 "move_operand"
+ "r,J,N,K,RQ,rM,rM,!fM,*RT,f"))]
+ "(register_operand (operands[0], DImode)
+ || reg_or_0_operand (operands[1], DImode))
+ && ! TARGET_SOFT_FLOAT && TARGET_64BIT"
+ "@
+ copy %1,%0
+ ldi %1,%0
+ ldil L'%1,%0
+ depdi,z %z1,%0
+ ldd%M1 %1,%0
+ std%M0 %r1,%0
+ mtsar %r1
+ fcpy,dbl %f1,%0
+ fldd%F1 %1,%0
+ fstd%F0 %1,%0"
+ [(set_attr "type" "move,move,move,shift,load,store,move,fpalu,fpload,fpstore")
+ (set_attr "pa_combine_type" "addmove")
+ (set_attr "length" "4,4,4,4,4,4,4,4,4,4")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "reg_or_nonsymb_mem_operand"
"=r,o,Q,r,r,r")
(match_operand:DI 1 "general_operand"
"rM,r,r,o,Q,i"))]
"(register_operand (operands[0], DImode)
|| reg_or_0_operand (operands[1], DImode))
+ && ! TARGET_64BIT
&& TARGET_SOFT_FLOAT"
"*
{
@@ -2517,7 +3237,7 @@
[(set (match_operand:DI 0 "register_operand" "=r,&r")
(lo_sum:DI (match_operand:DI 1 "register_operand" "0,r")
(match_operand:DI 2 "immediate_operand" "i,i")))]
- ""
+ "!TARGET_64BIT"
"*
{
/* Don't output a 64 bit constant, since we can't trust the assembler to
@@ -2766,6 +3486,30 @@
[(set_attr "type" "shift,load")
(set_attr "length" "4,4")])
+(define_insn "zero_extendqidi2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI (match_operand:QI 1 "register_operand" "r")))]
+ "TARGET_64BIT"
+ "extrd,u %1,63,8,%0"
+ [(set_attr "type" "shift")
+ (set_attr "length" "4")])
+
+(define_insn "zero_extendhidi2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI (match_operand:HI 1 "register_operand" "r")))]
+ "TARGET_64BIT"
+ "extrd,u %1,63,16,%0"
+ [(set_attr "type" "shift")
+ (set_attr "length" "4")])
+
+(define_insn "zero_extendsidi2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI (match_operand:SI 1 "register_operand" "r")))]
+ "TARGET_64BIT"
+ "extrd,u %1,63,32,%0"
+ [(set_attr "type" "shift")
+ (set_attr "length" "4")])
+
;;- sign extension instructions
(define_insn "extendhisi2"
@@ -2791,6 +3535,31 @@
"{extrs|extrw,s} %1,31,8,%0"
[(set_attr "type" "shift")
(set_attr "length" "4")])
+
+(define_insn "extendqidi2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (sign_extend:DI (match_operand:QI 1 "register_operand" "r")))]
+ "TARGET_64BIT"
+ "extrd,s %1,63,8,%0"
+ [(set_attr "type" "shift")
+ (set_attr "length" "4")])
+
+(define_insn "extendhidi2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (sign_extend:DI (match_operand:HI 1 "register_operand" "r")))]
+ "TARGET_64BIT"
+ "extrd,s %1,63,16,%0"
+ [(set_attr "type" "shift")
+ (set_attr "length" "4")])
+
+(define_insn "extendsidi2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (sign_extend:DI (match_operand:SI 1 "register_operand" "r")))]
+ "TARGET_64BIT"
+ "extrd,s %1,63,32,%0"
+ [(set_attr "type" "shift")
+ (set_attr "length" "4")])
+
;; Conversions between float and double.
@@ -3014,15 +3783,23 @@
(define_expand "adddi3"
[(set (match_operand:DI 0 "register_operand" "")
(plus:DI (match_operand:DI 1 "register_operand" "")
- (match_operand:DI 2 "arith11_operand" "")))]
+ (match_operand:DI 2 "arith_operand" "")))]
""
"")
+;; We allow arith_operand for operands2, even though strictly speaking it
+;; we would prefer to us arith11_operand since that's what the hardware
+;; can actually support.
+;;
+;; But the price of the extra reload in that case is worth the simplicity
+;; we get by allowing a trivial adddi3 expander to be used for both
+;; PA64 and PA32.
+
(define_insn ""
[(set (match_operand:DI 0 "register_operand" "=r")
(plus:DI (match_operand:DI 1 "register_operand" "%r")
- (match_operand:DI 2 "arith11_operand" "rI")))]
- ""
+ (match_operand:DI 2 "arith_operand" "rI")))]
+ "!TARGET_64BIT"
"*
{
if (GET_CODE (operands[2]) == CONST_INT)
@@ -3039,6 +3816,27 @@
(set_attr "length" "8")])
(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (plus:DI (match_operand:DI 1 "register_operand" "%r,r")
+ (match_operand:DI 2 "arith_operand" "r,J")))]
+ "TARGET_64BIT"
+ "@
+ {addl|add,l} %1,%2,%0
+ ldo %2(%1),%0"
+ [(set_attr "type" "binary,binary")
+ (set_attr "pa_combine_type" "addmove")
+ (set_attr "length" "4,4")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (plus:DI (not:DI (match_operand:DI 1 "register_operand" "r"))
+ (match_operand:DI 2 "register_operand" "r")))]
+ "TARGET_64BIT"
+ "uaddcm %2,%1,%0"
+ [(set_attr "type" "binary")
+ (set_attr "length" "4")])
+
+(define_insn ""
[(set (match_operand:SI 0 "register_operand" "=r")
(plus:SI (not:SI (match_operand:SI 1 "register_operand" "r"))
(match_operand:SI 2 "register_operand" "r")))]
@@ -3135,11 +3933,23 @@
[(set (match_operand:DI 0 "register_operand" "=r")
(minus:DI (match_operand:DI 1 "register_operand" "r")
(match_operand:DI 2 "register_operand" "r")))]
- ""
+ "!TARGET_64BIT"
"sub %R1,%R2,%R0\;{subb|sub,b} %1,%2,%0"
[(set_attr "type" "binary")
(set_attr "length" "8")])
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r,r,q")
+ (minus:DI (match_operand:DI 1 "arith11_operand" "r,I,U")
+ (match_operand:DI 2 "register_operand" "r,r,r")))]
+ "TARGET_64BIT"
+ "@
+ sub %1,%2,%0
+ subi %1,%2,%0
+ mtsarcm %2"
+ [(set_attr "type" "binary,binary,move")
+ (set_attr "length" "4,4,4")])
+
(define_expand "subsi3"
[(set (match_operand:SI 0 "register_operand" "")
(minus:SI (match_operand:SI 1 "arith11_operand" "")
@@ -3193,6 +4003,16 @@
operands[1] = force_reg (SImode, operands[1]);
operands[2] = force_reg (SImode, operands[2]);
emit_insn (gen_umulsidi3 (scratch, operands[1], operands[2]));
+ /* We do not want (subreg:SI (XX:DI) 1)) for TARGET_64BIT since
+ that has no real meaning. */
+ if (TARGET_64BIT)
+ {
+ emit_insn (gen_rtx_SET (VOIDmode,
+ operands[0],
+ gen_rtx_SUBREG (SImode, scratch, 0)));
+ DONE;
+
+ }
emit_insn (gen_rtx_SET (VOIDmode, operands[0],
gen_rtx_SUBREG (SImode, scratch, 1)));
DONE;
@@ -3213,12 +4033,21 @@
[(set (match_operand:DI 0 "nonimmediate_operand" "=f")
(mult:DI (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand" "f"))
(match_operand:DI 2 "uint32_operand" "f")))]
- "TARGET_PA_11 && ! TARGET_DISABLE_FPREGS && ! TARGET_SOFT_FLOAT"
+ "TARGET_PA_11 && ! TARGET_DISABLE_FPREGS && ! TARGET_SOFT_FLOAT && !TARGET_64BIT"
"xmpyu %1,%R2,%0"
[(set_attr "type" "fpmuldbl")
(set_attr "length" "4")])
(define_insn ""
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=f")
+ (mult:DI (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand" "f"))
+ (match_operand:DI 2 "uint32_operand" "f")))]
+ "TARGET_PA_11 && ! TARGET_DISABLE_FPREGS && ! TARGET_SOFT_FLOAT && TARGET_64BIT"
+ "xmpyu %1,%2R,%0"
+ [(set_attr "type" "fpmuldbl")
+ (set_attr "length" "4")])
+
+(define_insn ""
[(set (reg:SI 29) (mult:SI (reg:SI 26) (reg:SI 25)))
(clobber (match_operand:SI 0 "register_operand" "=a"))
(clobber (reg:SI 26))
@@ -3252,6 +4081,54 @@
;; Out of range and either PIC or PORTABLE_RUNTIME
(const_int 24)))])
+(define_expand "muldi3"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (mult:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand:DI 2 "register_operand" "")))]
+ "TARGET_64BIT && ! TARGET_DISABLE_FPREGS && ! TARGET_SOFT_FLOAT"
+ "
+{
+ rtx low_product = gen_reg_rtx (DImode);
+ rtx cross_product1 = gen_reg_rtx (DImode);
+ rtx cross_product2 = gen_reg_rtx (DImode);
+ rtx cross_scratch = gen_reg_rtx (DImode);
+ rtx cross_product = gen_reg_rtx (DImode);
+ rtx op1l, op1r, op2l, op2r;
+ rtx op1shifted, op2shifted;
+
+ op1shifted = gen_reg_rtx (DImode);
+ op2shifted = gen_reg_rtx (DImode);
+ op1l = gen_reg_rtx (SImode);
+ op1r = gen_reg_rtx (SImode);
+ op2l = gen_reg_rtx (SImode);
+ op2r = gen_reg_rtx (SImode);
+
+ emit_move_insn (op1shifted, gen_rtx_LSHIFTRT (DImode, operands[1],
+ GEN_INT (32)));
+ emit_move_insn (op2shifted, gen_rtx_LSHIFTRT (DImode, operands[2],
+ GEN_INT (32)));
+ op1r = gen_rtx_SUBREG (SImode, operands[1], 0);
+ op2r = gen_rtx_SUBREG (SImode, operands[2], 0);
+ op1l = gen_rtx_SUBREG (SImode, op1shifted, 0);
+ op2l = gen_rtx_SUBREG (SImode, op2shifted, 0);
+
+ /* Emit multiplies for the cross products. */
+ emit_insn (gen_umulsidi3 (cross_product1, op2r, op1l));
+ emit_insn (gen_umulsidi3 (cross_product2, op2l, op1r));
+
+ /* Emit a multiply for the low sub-word. */
+ emit_insn (gen_umulsidi3 (low_product, op2r, op1r));
+
+ /* Sum the cross products and shift them into proper position. */
+ emit_insn (gen_adddi3 (cross_scratch, cross_product1, cross_product2));
+ emit_insn (gen_ashldi3 (cross_product, cross_scratch, GEN_INT (32)));
+
+ /* Add the cross product to the low product and store the result
+ into the output operand . */
+ emit_insn (gen_adddi3 (operands[0], cross_product, low_product));
+ DONE;
+}")
+
;;; Division and mod.
(define_expand "divsi3"
[(set (reg:SI 26) (match_operand:SI 1 "move_operand" ""))
@@ -3268,6 +4145,8 @@
{
operands[3] = gen_reg_rtx (SImode);
operands[4] = gen_reg_rtx (SImode);
+ if (TARGET_64BIT)
+ operands[4] = gen_rtx_REG (SImode, 2);
if (GET_CODE (operands[2]) == CONST_INT && emit_hpdiv_const (operands, 0))
DONE;
}")
@@ -3324,6 +4203,8 @@
{
operands[3] = gen_reg_rtx (SImode);
operands[4] = gen_reg_rtx (SImode);
+ if (TARGET_64BIT)
+ operands[4] = gen_rtx_REG (SImode, 2);
if (GET_CODE (operands[2]) == CONST_INT && emit_hpdiv_const (operands, 1))
DONE;
}")
@@ -3379,6 +4260,8 @@
"
{
operands[4] = gen_reg_rtx (SImode);
+ if (TARGET_64BIT)
+ operands[4] = gen_rtx_REG (SImode, 2);
operands[3] = gen_reg_rtx (SImode);
}")
@@ -3432,6 +4315,8 @@
"
{
operands[4] = gen_reg_rtx (SImode);
+ if (TARGET_64BIT)
+ operands[4] = gen_rtx_REG (SImode, 2);
operands[3] = gen_reg_rtx (SImode);
}")
@@ -3492,11 +4377,20 @@
[(set (match_operand:DI 0 "register_operand" "=r")
(and:DI (match_operand:DI 1 "register_operand" "%r")
(match_operand:DI 2 "register_operand" "r")))]
- ""
+ "!TARGET_64BIT"
"and %1,%2,%0\;and %R1,%R2,%R0"
[(set_attr "type" "binary")
(set_attr "length" "8")])
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (and:DI (match_operand:DI 1 "register_operand" "%?r,0")
+ (match_operand:DI 2 "and_operand" "rO,P")))]
+ "TARGET_64BIT"
+ "* return output_64bit_and (operands); "
+ [(set_attr "type" "binary")
+ (set_attr "length" "4")])
+
; The ? for op1 makes reload prefer zdepi instead of loading a huge
; constant with ldil;ldo.
(define_insn "andsi3"
@@ -3512,12 +4406,21 @@
[(set (match_operand:DI 0 "register_operand" "=r")
(and:DI (not:DI (match_operand:DI 1 "register_operand" "r"))
(match_operand:DI 2 "register_operand" "r")))]
- ""
+ "!TARGET_64BIT"
"andcm %2,%1,%0\;andcm %R2,%R1,%R0"
[(set_attr "type" "binary")
(set_attr "length" "8")])
(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (and:DI (not:DI (match_operand:DI 1 "register_operand" "r"))
+ (match_operand:DI 2 "register_operand" "r")))]
+ "TARGET_64BIT"
+ "andcm %2,%1,%0"
+ [(set_attr "type" "binary")
+ (set_attr "length" "4")])
+
+(define_insn ""
[(set (match_operand:SI 0 "register_operand" "=r")
(and:SI (not:SI (match_operand:SI 1 "register_operand" "r"))
(match_operand:SI 2 "register_operand" "r")))]
@@ -3543,11 +4446,29 @@
[(set (match_operand:DI 0 "register_operand" "=r")
(ior:DI (match_operand:DI 1 "register_operand" "%r")
(match_operand:DI 2 "register_operand" "r")))]
- ""
+ "!TARGET_64BIT"
"or %1,%2,%0\;or %R1,%R2,%R0"
[(set_attr "type" "binary")
(set_attr "length" "8")])
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (ior:DI (match_operand:DI 1 "register_operand" "0,0")
+ (match_operand:DI 2 "ior_operand" "M,i")))]
+ "TARGET_64BIT"
+ "* return output_64bit_ior (operands); "
+ [(set_attr "type" "binary,shift")
+ (set_attr "length" "4,4")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ior:DI (match_operand:DI 1 "register_operand" "%r")
+ (match_operand:DI 2 "register_operand" "r")))]
+ "TARGET_64BIT"
+ "or %1,%2,%0"
+ [(set_attr "type" "binary")
+ (set_attr "length" "4")])
+
;; Need a define_expand because we've run out of CONST_OK... characters.
(define_expand "iorsi3"
[(set (match_operand:SI 0 "register_operand" "")
@@ -3596,11 +4517,20 @@
[(set (match_operand:DI 0 "register_operand" "=r")
(xor:DI (match_operand:DI 1 "register_operand" "%r")
(match_operand:DI 2 "register_operand" "r")))]
- ""
+ "!TARGET_64BIT"
"xor %1,%2,%0\;xor %R1,%R2,%R0"
[(set_attr "type" "binary")
(set_attr "length" "8")])
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (xor:DI (match_operand:DI 1 "register_operand" "%r")
+ (match_operand:DI 2 "register_operand" "r")))]
+ "TARGET_64BIT"
+ "xor %1,%2,%0"
+ [(set_attr "type" "binary")
+ (set_attr "length" "4")])
+
(define_insn "xorsi3"
[(set (match_operand:SI 0 "register_operand" "=r")
(xor:SI (match_operand:SI 1 "register_operand" "%r")
@@ -3619,11 +4549,19 @@
(define_insn ""
[(set (match_operand:DI 0 "register_operand" "=r")
(neg:DI (match_operand:DI 1 "register_operand" "r")))]
- ""
+ "!TARGET_64BIT"
"sub %%r0,%R1,%R0\;{subb|sub,b} %%r0,%1,%0"
[(set_attr "type" "unary")
(set_attr "length" "8")])
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (neg:DI (match_operand:DI 1 "register_operand" "r")))]
+ "TARGET_64BIT"
+ "sub %%r0,%1,%0"
+ [(set_attr "type" "unary")
+ (set_attr "length" "4")])
+
(define_insn "negsi2"
[(set (match_operand:SI 0 "register_operand" "=r")
(neg:SI (match_operand:SI 1 "register_operand" "r")))]
@@ -3645,11 +4583,19 @@
(define_insn ""
[(set (match_operand:DI 0 "register_operand" "=r")
(not:DI (match_operand:DI 1 "register_operand" "r")))]
- ""
+ "!TARGET_64BIT"
"uaddcm %%r0,%1,%0\;uaddcm %%r0,%R1,%R0"
[(set_attr "type" "unary")
(set_attr "length" "8")])
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (not:DI (match_operand:DI 1 "register_operand" "r")))]
+ "TARGET_64BIT"
+ "uaddcm %%r0,%1,%0"
+ [(set_attr "type" "unary")
+ (set_attr "length" "4")])
+
(define_insn "one_cmplsi2"
[(set (match_operand:SI 0 "register_operand" "=r")
(not:SI (match_operand:SI 1 "register_operand" "r")))]
@@ -4210,6 +5156,16 @@
[(set_attr "type" "binary")
(set_attr "length" "4")])
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (plus:DI (mult:DI (match_operand:DI 2 "register_operand" "r")
+ (match_operand:DI 3 "shadd_operand" ""))
+ (match_operand:DI 1 "register_operand" "r")))]
+ "TARGET_64BIT"
+ "shladd,l %2,%O3,%1,%0"
+ [(set_attr "type" "binary")
+ (set_attr "length" "4")])
+
;; This anonymous pattern and splitter wins because it reduces the latency
;; of the shadd sequence without increasing the latency of the shift.
;;
@@ -4250,6 +5206,33 @@
(match_dup 1)))]
"")
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (plus:DI (mult:DI (match_operand:DI 2 "register_operand" "r")
+ (match_operand:DI 3 "shadd_operand" ""))
+ (match_operand:DI 1 "register_operand" "r")))
+ (set (match_operand:DI 4 "register_operand" "=&r")
+ (ashift:DI (match_dup 2)
+ (match_operand:DI 5 "const_int_operand" "i")))]
+ "TARGET_64BIT && INTVAL (operands[5]) == exact_log2 (INTVAL (operands[3]))"
+ "#"
+ [(set_attr "type" "binary")
+ (set_attr "length" "8")])
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (plus:DI (mult:DI (match_operand:DI 2 "register_operand" "r")
+ (match_operand:DI 3 "shadd_operand" ""))
+ (match_operand:DI 1 "register_operand" "r")))
+ (set (match_operand:DI 4 "register_operand" "=&r")
+ (ashift:DI (match_dup 2)
+ (match_operand:DI 5 "const_int_operand" "i")))]
+ "TARGET_64BIT && INTVAL (operands[5]) == exact_log2 (INTVAL (operands[3]))"
+ [(set (match_dup 4) (ashift:DI (match_dup 2) (match_dup 5)))
+ (set (match_dup 0) (plus:DI (mult:DI (match_dup 2) (match_dup 3))
+ (match_dup 1)))]
+ "")
+
(define_expand "ashlsi3"
[(set (match_operand:SI 0 "register_operand" "")
(ashift:SI (match_operand:SI 1 "lhs_lshift_operand" "")
@@ -4349,6 +5332,105 @@
[(set_attr "type" "shift")
(set_attr "length" "4")])
+(define_expand "ashldi3"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (ashift:DI (match_operand:DI 1 "lhs_lshift_operand" "")
+ (match_operand:DI 2 "arith32_operand" "")))]
+ "TARGET_64BIT"
+ "
+{
+ if (GET_CODE (operands[2]) != CONST_INT)
+ {
+ rtx temp = gen_reg_rtx (DImode);
+ emit_insn (gen_subdi3 (temp, GEN_INT (63), operands[2]));
+ if (GET_CODE (operands[1]) == CONST_INT)
+ emit_insn (gen_zvdep_imm64 (operands[0], operands[1], temp));
+ else
+ emit_insn (gen_zvdep64 (operands[0], operands[1], temp));
+ DONE;
+ }
+ /* Make sure both inputs are not constants,
+ there are no patterns for that. */
+ operands[1] = force_reg (DImode, operands[1]);
+}")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ashift:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "const_int_operand" "n")))]
+ "TARGET_64BIT"
+ "depd,z %1,%p2,%Q2,%0"
+ [(set_attr "type" "shift")
+ (set_attr "length" "4")])
+
+; Match cases of op1 a CONST_INT here that zvdep_imm64 doesn't handle.
+; Doing it like this makes slightly better code since reload can
+; replace a register with a known value in range -16..15 with a
+; constant. Ideally, we would like to merge zvdep64 and zvdep_imm64,
+; but since we have no more CONST_OK... characters, that is not
+; possible.
+(define_insn "zvdep64"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (ashift:DI (match_operand:DI 1 "arith5_operand" "r,L")
+ (minus:DI (const_int 63)
+ (match_operand:DI 2 "register_operand" "q,q"))))]
+ "TARGET_64BIT"
+ "@
+ depd,z %1,%%sar,64,%0
+ depdi,z %1,%%sar,64,%0"
+ [(set_attr "type" "shift,shift")
+ (set_attr "length" "4,4")])
+
+(define_insn "zvdep_imm64"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ashift:DI (match_operand:DI 1 "lhs_lshift_cint_operand" "")
+ (minus:DI (const_int 63)
+ (match_operand:DI 2 "register_operand" "q"))))]
+ "TARGET_64BIT"
+ "*
+{
+ int x = INTVAL (operands[1]);
+ operands[2] = GEN_INT (4 + exact_log2 ((x >> 4) + 1));
+ operands[1] = GEN_INT ((x & 0x1f) - 0x20);
+ return \"depdi,z %1,%%sar,%2,%0\";
+}"
+ [(set_attr "type" "shift")
+ (set_attr "length" "4")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ior:DI (ashift:DI (match_operand:DI 1 "const_int_operand" "")
+ (minus:DI (const_int 63)
+ (match_operand:DI 2 "register_operand" "q")))
+ (match_operand:DI 3 "register_operand" "0")))]
+ ; accept ...0001...1, can this be generalized?
+ "TARGET_64BIT && exact_log2 (INTVAL (operands[1]) + 1) >= 0"
+ "*
+{
+ int x = INTVAL (operands[1]);
+ operands[2] = GEN_INT (exact_log2 (x + 1));
+ return \"depdi -1,%%sar,%2,%0\";
+}"
+ [(set_attr "type" "shift")
+ (set_attr "length" "4")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (and:DI (rotate:DI (match_operand:DI 1 "const_int_operand" "")
+ (minus:DI (const_int 63)
+ (match_operand:DI 2 "register_operand" "q")))
+ (match_operand:DI 3 "register_operand" "0")))]
+ ; this can be generalized...!
+ "TARGET_64BIT && INTVAL (operands[1]) == -2"
+ "*
+{
+ int x = INTVAL (operands[1]);
+ operands[2] = GEN_INT (exact_log2 ((~x) + 1));
+ return \"depdi 0,%%sar,%2,%0\";
+}"
+ [(set_attr "type" "shift")
+ (set_attr "length" "4")])
+
(define_expand "ashrsi3"
[(set (match_operand:SI 0 "register_operand" "")
(ashiftrt:SI (match_operand:SI 1 "register_operand" "")
@@ -4384,6 +5466,41 @@
[(set_attr "type" "shift")
(set_attr "length" "4")])
+(define_expand "ashrdi3"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (ashiftrt:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand:DI 2 "arith32_operand" "")))]
+ "TARGET_64BIT"
+ "
+{
+ if (GET_CODE (operands[2]) != CONST_INT)
+ {
+ rtx temp = gen_reg_rtx (DImode);
+ emit_insn (gen_subdi3 (temp, GEN_INT (63), operands[2]));
+ emit_insn (gen_vextrs64 (operands[0], operands[1], temp));
+ DONE;
+ }
+}")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ashiftrt:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "const_int_operand" "n")))]
+ "TARGET_64BIT"
+ "extrd,s %1,%p2,%Q2,%0"
+ [(set_attr "type" "shift")
+ (set_attr "length" "4")])
+
+(define_insn "vextrs64"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ashiftrt:DI (match_operand:DI 1 "register_operand" "r")
+ (minus:DI (const_int 63)
+ (match_operand:DI 2 "register_operand" "q"))))]
+ "TARGET_64BIT"
+ "extrd,s %1,%%sar,64,%0"
+ [(set_attr "type" "shift")
+ (set_attr "length" "4")])
+
(define_insn "lshrsi3"
[(set (match_operand:SI 0 "register_operand" "=r,r")
(lshiftrt:SI (match_operand:SI 1 "register_operand" "r,r")
@@ -4395,6 +5512,17 @@
[(set_attr "type" "shift")
(set_attr "length" "4")])
+(define_insn "lshrdi3"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (lshiftrt:DI (match_operand:DI 1 "register_operand" "r,r")
+ (match_operand:DI 2 "arith32_operand" "q,n")))]
+ "TARGET_64BIT"
+ "@
+ shrpd %%r0,%1,%%sar,%0
+ extrd,u %1,%p2,%Q2,%0"
+ [(set_attr "type" "shift")
+ (set_attr "length" "4")])
+
(define_insn "rotrsi3"
[(set (match_operand:SI 0 "register_operand" "=r,r")
(rotatert:SI (match_operand:SI 1 "register_operand" "r,r")
@@ -4517,6 +5645,15 @@
""
"hppa_expand_prologue ();DONE;")
+(define_expand "sibcall_epilogue"
+ [(return)]
+ ""
+ "
+{
+ hppa_expand_epilogue ();
+ DONE;
+}")
+
(define_expand "epilogue"
[(return)]
""
@@ -4660,6 +5797,16 @@
operands[0] = reg;
}
+ /* In 64bit mode we must make sure to wipe the upper bits of the register
+ just in case the addition overflowed or we had random bits in the
+ high part of the register. */
+ if (TARGET_64BIT)
+ {
+ rtx reg = gen_reg_rtx (DImode);
+ emit_insn (gen_extendsidi2 (reg, operands[0]));
+ operands[0] = gen_rtx_SUBREG (SImode, reg, 0);
+ }
+
if (!INT_5_BITS (operands[2]))
operands[2] = force_reg (SImode, operands[2]);
@@ -4704,6 +5851,11 @@
else
op = XEXP (operands[0], 0);
+ if (TARGET_64BIT)
+ emit_move_insn (arg_pointer_rtx,
+ gen_rtx_PLUS (word_mode, virtual_outgoing_args_rtx,
+ GEN_INT (64)));
+
/* Use two different patterns for calls to explicitly named functions
and calls through function pointers. This is necessary as these two
types of calls use different calling conventions, and CSE might try
@@ -4711,6 +5863,12 @@
two patterns keeps CSE from performing this optimization). */
if (GET_CODE (op) == SYMBOL_REF)
call_insn = emit_call_insn (gen_call_internal_symref (op, operands[1]));
+ else if (TARGET_64BIT)
+ {
+ rtx tmpreg = force_reg (word_mode, op);
+ call_insn = emit_call_insn (gen_call_internal_reg_64bit (tmpreg,
+ operands[1]));
+ }
else
{
rtx tmpreg = gen_rtx_REG (word_mode, 22);
@@ -4723,6 +5881,8 @@
use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), pic_offset_table_rtx);
use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
gen_rtx_REG (word_mode, PIC_OFFSET_TABLE_REGNUM_SAVED));
+ if (TARGET_64BIT)
+ use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), arg_pointer_rtx);
/* After each call we must restore the PIC register, even if it
doesn't appear to be used.
@@ -4744,7 +5904,7 @@
"*
{
output_arg_descriptor (insn);
- return output_call (insn, operands[0]);
+ return output_call (insn, operands[0], 0);
}"
[(set_attr "type" "call")
(set (attr "length")
@@ -4762,6 +5922,23 @@
(const_int 52)
(const_int 68))))])
+(define_insn "call_internal_reg_64bit"
+ [(call (mem:SI (match_operand:DI 0 "register_operand" "r"))
+ (match_operand 1 "" "i"))
+ (clobber (reg:SI 2))
+ (use (const_int 1))]
+ "TARGET_64BIT"
+ "*
+{
+ rtx xoperands[2];
+
+ /* ??? Needs more work. Length computation, split into multiple insns,
+ do not use %r22 directly, expose delay slot. */
+ return \"ldd 16(%0),%%r2\;ldd 24(%0),%%r27\;bve,l (%%r2),%%r2\;nop\";
+}"
+ [(set_attr "type" "dyncall")
+ (set (attr "length") (const_int 16))])
+
(define_insn "call_internal_reg"
[(call (mem:SI (reg:SI 22))
(match_operand 0 "" "i"))
@@ -4850,6 +6027,11 @@
else
op = XEXP (operands[1], 0);
+ if (TARGET_64BIT)
+ emit_move_insn (arg_pointer_rtx,
+ gen_rtx_PLUS (word_mode, virtual_outgoing_args_rtx,
+ GEN_INT (64)));
+
/* Use two different patterns for calls to explicitly named functions
and calls through function pointers. This is necessary as these two
types of calls use different calling conventions, and CSE might try
@@ -4859,6 +6041,14 @@
call_insn = emit_call_insn (gen_call_value_internal_symref (operands[0],
op,
operands[2]));
+ else if (TARGET_64BIT)
+ {
+ rtx tmpreg = force_reg (word_mode, op);
+ call_insn
+ = emit_call_insn (gen_call_value_internal_reg_64bit (operands[0],
+ tmpreg,
+ operands[2]));
+ }
else
{
rtx tmpreg = gen_rtx_REG (word_mode, 22);
@@ -4871,6 +6061,8 @@
use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), pic_offset_table_rtx);
use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
gen_rtx_REG (word_mode, PIC_OFFSET_TABLE_REGNUM_SAVED));
+ if (TARGET_64BIT)
+ use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), arg_pointer_rtx);
/* After each call we must restore the PIC register, even if it
doesn't appear to be used.
@@ -4894,7 +6086,7 @@
"*
{
output_arg_descriptor (insn);
- return output_call (insn, operands[1]);
+ return output_call (insn, operands[1], 0);
}"
[(set_attr "type" "call")
(set (attr "length")
@@ -4912,6 +6104,22 @@
(const_int 52)
(const_int 68))))])
+(define_insn "call_value_internal_reg_64bit"
+ [(set (match_operand 0 "" "=rf")
+ (call (mem:SI (match_operand:DI 1 "register_operand" "r"))
+ (match_operand 2 "" "i")))
+ (clobber (reg:SI 2))
+ (use (const_int 1))]
+ "TARGET_64BIT"
+ "*
+{
+ /* ??? Needs more work. Length computation, split into multiple insns,
+ do not use %r22 directly, expose delay slot. */
+ return \"ldd 16(%1),%%r2\;ldd 24(%1),%%r27\;bve,l (%%r2),%%r2\;nop\";
+}"
+ [(set_attr "type" "dyncall")
+ (set (attr "length") (const_int 16))])
+
(define_insn "call_value_internal_reg"
[(set (match_operand 0 "" "=rf")
(call (mem:SI (reg:SI 22))
@@ -5013,6 +6221,131 @@
DONE;
}")
+
+(define_expand "sibcall"
+ [(parallel [(call (match_operand:SI 0 "" "")
+ (match_operand 1 "" ""))
+ (clobber (reg:SI 0))])]
+ "! TARGET_PORTABLE_RUNTIME"
+ "
+{
+ rtx op;
+ rtx call_insn;
+
+ op = XEXP (operands[0], 0);
+
+ /* We do not allow indirect sibling calls. */
+ call_insn = emit_call_insn (gen_sibcall_internal_symref (op, operands[1]));
+
+ if (flag_pic)
+ {
+ use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), pic_offset_table_rtx);
+ use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
+ gen_rtx_REG (word_mode, PIC_OFFSET_TABLE_REGNUM_SAVED));
+
+ /* After each call we must restore the PIC register, even if it
+ doesn't appear to be used.
+
+ This will set regs_ever_live for the callee saved register we
+ stored the PIC register in. */
+ emit_move_insn (pic_offset_table_rtx,
+ gen_rtx_REG (word_mode, PIC_OFFSET_TABLE_REGNUM_SAVED));
+ }
+ DONE;
+}")
+
+(define_insn "sibcall_internal_symref"
+ [(call (mem:SI (match_operand 0 "call_operand_address" ""))
+ (match_operand 1 "" "i"))
+ (clobber (reg:SI 0))
+ (use (reg:SI 2))
+ (use (const_int 0))]
+ "! TARGET_PORTABLE_RUNTIME"
+ "*
+{
+ output_arg_descriptor (insn);
+ return output_call (insn, operands[0], 1);
+}"
+ [(set_attr "type" "call")
+ (set (attr "length")
+;; If we're sure that we can either reach the target or that the
+;; linker can use a long-branch stub, then the length is 4 bytes.
+;;
+;; For long-calls the length will be either 52 bytes (non-pic)
+;; or 68 bytes (pic). */
+;; Else we have to use a long-call;
+ (if_then_else (lt (plus (symbol_ref "total_code_bytes") (pc))
+ (const_int 240000))
+ (const_int 4)
+ (if_then_else (eq (symbol_ref "flag_pic")
+ (const_int 0))
+ (const_int 52)
+ (const_int 68))))])
+
+(define_expand "sibcall_value"
+ [(parallel [(set (match_operand 0 "" "")
+ (call (match_operand:SI 1 "" "")
+ (match_operand 2 "" "")))
+ (clobber (reg:SI 0))])]
+ "! TARGET_PORTABLE_RUNTIME"
+ "
+{
+ rtx op;
+ rtx call_insn;
+
+ op = XEXP (operands[1], 0);
+
+ /* We do not allow indirect sibling calls. */
+ call_insn = emit_call_insn (gen_sibcall_value_internal_symref (operands[0],
+ op,
+ operands[2]));
+ if (flag_pic)
+ {
+ use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), pic_offset_table_rtx);
+ use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
+ gen_rtx_REG (word_mode, PIC_OFFSET_TABLE_REGNUM_SAVED));
+
+ /* After each call we must restore the PIC register, even if it
+ doesn't appear to be used.
+
+ This will set regs_ever_live for the callee saved register we
+ stored the PIC register in. */
+ emit_move_insn (pic_offset_table_rtx,
+ gen_rtx_REG (word_mode, PIC_OFFSET_TABLE_REGNUM_SAVED));
+ }
+ DONE;
+}")
+
+(define_insn "sibcall_value_internal_symref"
+ [(set (match_operand 0 "" "=rf")
+ (call (mem:SI (match_operand 1 "call_operand_address" ""))
+ (match_operand 2 "" "i")))
+ (clobber (reg:SI 0))
+ (use (reg:SI 2))
+ (use (const_int 0))]
+ ;;- Don't use operand 1 for most machines.
+ "! TARGET_PORTABLE_RUNTIME"
+ "*
+{
+ output_arg_descriptor (insn);
+ return output_call (insn, operands[1], 1);
+}"
+ [(set_attr "type" "call")
+ (set (attr "length")
+;; If we're sure that we can either reach the target or that the
+;; linker can use a long-branch stub, then the length is 4 bytes.
+;;
+;; For long-calls the length will be either 52 bytes (non-pic)
+;; or 68 bytes (pic). */
+;; Else we have to use a long-call;
+ (if_then_else (lt (plus (symbol_ref "total_code_bytes") (pc))
+ (const_int 240000))
+ (const_int 4)
+ (if_then_else (eq (symbol_ref "flag_pic")
+ (const_int 0))
+ (const_int 52)
+ (const_int 68))))])
+
(define_insn "nop"
[(const_int 0)]
""
@@ -5050,22 +6383,32 @@
[(set_attr "type" "move")
(set_attr "length" "0")])
-;;; Hope this is only within a function...
-(define_insn "indirect_jump"
- [(set (pc) (match_operand 0 "register_operand" "r"))]
- "GET_MODE (operands[0]) == word_mode"
- "bv%* %%r0(%0)"
- [(set_attr "type" "branch")
- (set_attr "length" "4")])
-
;;; EH does longjmp's from and within the data section. Thus,
;;; an interspace branch is required for the longjmp implementation.
;;; Registers r1 and r2 are not saved in the jmpbuf environment.
;;; Thus, they can be used as scratch registers for the jump.
-(define_insn "interspace_jump"
- [(set (pc) (match_operand:SI 0 "register_operand" "a"))
- (clobber (reg:SI 2))]
+(define_expand "interspace_jump"
+ [(parallel
+ [(set (pc) (match_operand 0 "pmode_register_operand" "a"))
+ (clobber (match_dup 1))])]
""
+ "
+{
+ operands[1] = gen_rtx_REG (word_mode, 2);
+}")
+
+(define_insn ""
+ [(set (pc) (match_operand 0 "pmode_register_operand" "a"))
+ (clobber (reg:SI 2))]
+ "!TARGET_64BIT"
+ "ldsid (%%sr0,%0),%%r2\; mtsp %%r2,%%sr0\; be%* 0(%%sr0,%0)"
+ [(set_attr "type" "branch")
+ (set_attr "length" "12")])
+
+(define_insn ""
+ [(set (pc) (match_operand 0 "pmode_register_operand" "a"))
+ (clobber (reg:DI 2))]
+ "TARGET_64BIT"
"ldsid (%%sr0,%0),%%r2\; mtsp %%r2,%%sr0\; be%* 0(%%sr0,%0)"
[(set_attr "type" "branch")
(set_attr "length" "12")])
@@ -5095,21 +6438,32 @@
emit_barrier ();
DONE;
}")
+;;; Hope this is only within a function...
+(define_insn "indirect_jump"
+ [(set (pc) (match_operand 0 "register_operand" "r"))]
+ "GET_MODE (operands[0]) == word_mode"
+ "bv%* %%r0(%0)"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
(define_expand "extzv"
- [(set (match_operand:SI 0 "register_operand" "")
- (zero_extract:SI (match_operand:SI 1 "register_operand" "")
- (match_operand:SI 2 "uint5_operand" "")
- (match_operand:SI 3 "uint5_operand" "")))]
+ [(set (match_operand 0 "register_operand" "")
+ (zero_extract (match_operand 1 "register_operand" "")
+ (match_operand 2 "uint32_operand" "")
+ (match_operand 3 "uint32_operand" "")))]
""
"
{
- if (! uint5_operand (operands[2], SImode)
- || ! uint5_operand (operands[3], SImode))
- FAIL;
+ if (TARGET_64BIT)
+ emit_insn (gen_extzv_64 (operands[0], operands[1],
+ operands[2], operands[3]));
+ else
+ emit_insn (gen_extzv_32 (operands[0], operands[1],
+ operands[2], operands[3]));
+ DONE;
}")
-(define_insn ""
+(define_insn "extzv_32"
[(set (match_operand:SI 0 "register_operand" "=r")
(zero_extract:SI (match_operand:SI 1 "register_operand" "r")
(match_operand:SI 2 "uint5_operand" "")
@@ -5129,20 +6483,44 @@
[(set_attr "type" "shift")
(set_attr "length" "4")])
+(define_insn "extzv_64"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extract:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "uint32_operand" "")
+ (match_operand:DI 3 "uint32_operand" "")))]
+ "TARGET_64BIT"
+ "extrd,u %1,%3+%2-1,%2,%0"
+ [(set_attr "type" "shift")
+ (set_attr "length" "4")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extract:DI (match_operand:DI 1 "register_operand" "r")
+ (const_int 1)
+ (match_operand:DI 3 "register_operand" "q")))]
+ "TARGET_64BIT"
+ "extrd,u %1,%%sar,1,%0"
+ [(set_attr "type" "shift")
+ (set_attr "length" "4")])
+
(define_expand "extv"
- [(set (match_operand:SI 0 "register_operand" "")
- (sign_extract:SI (match_operand:SI 1 "register_operand" "")
- (match_operand:SI 2 "uint5_operand" "")
- (match_operand:SI 3 "uint5_operand" "")))]
+ [(set (match_operand 0 "register_operand" "")
+ (sign_extract (match_operand 1 "register_operand" "")
+ (match_operand 2 "uint32_operand" "")
+ (match_operand 3 "uint32_operand" "")))]
""
"
{
- if (! uint5_operand (operands[2], SImode)
- || ! uint5_operand (operands[3], SImode))
- FAIL;
+ if (TARGET_64BIT)
+ emit_insn (gen_extv_64 (operands[0], operands[1],
+ operands[2], operands[3]));
+ else
+ emit_insn (gen_extv_32 (operands[0], operands[1],
+ operands[2], operands[3]));
+ DONE;
}")
-(define_insn ""
+(define_insn "extv_32"
[(set (match_operand:SI 0 "register_operand" "=r")
(sign_extract:SI (match_operand:SI 1 "register_operand" "r")
(match_operand:SI 2 "uint5_operand" "")
@@ -5157,25 +6535,50 @@
(sign_extract:SI (match_operand:SI 1 "register_operand" "r")
(const_int 1)
(match_operand:SI 3 "register_operand" "q")))]
- ""
+ "!TARGET_64BIT"
"{vextrs %1,1,%0|extrw,s %1,%%sar,1,%0}"
[(set_attr "type" "shift")
(set_attr "length" "4")])
+(define_insn "extv_64"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (sign_extract:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "uint32_operand" "")
+ (match_operand:DI 3 "uint32_operand" "")))]
+ "TARGET_64BIT"
+ "extrd,s %1,%3+%2-1,%2,%0"
+ [(set_attr "type" "shift")
+ (set_attr "length" "4")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (sign_extract:DI (match_operand:DI 1 "register_operand" "r")
+ (const_int 1)
+ (match_operand:DI 3 "register_operand" "q")))]
+ "TARGET_64BIT"
+ "extrd,s %1,%%sar,1,%0"
+ [(set_attr "type" "shift")
+ (set_attr "length" "4")])
+
+;; Only specify the mode operands 0, the rest are assumed to be word_mode.
(define_expand "insv"
- [(set (zero_extract:SI (match_operand:SI 0 "register_operand" "")
- (match_operand:SI 1 "uint5_operand" "")
- (match_operand:SI 2 "uint5_operand" ""))
- (match_operand:SI 3 "arith5_operand" "r,L"))]
+ [(set (zero_extract (match_operand 0 "register_operand" "")
+ (match_operand 1 "uint32_operand" "")
+ (match_operand 2 "uint32_operand" ""))
+ (match_operand 3 "arith5_operand" ""))]
""
"
{
- if (! uint5_operand (operands[1], SImode)
- || ! uint5_operand (operands[2], SImode))
- FAIL;
+ if (TARGET_64BIT)
+ emit_insn (gen_insv_64 (operands[0], operands[1],
+ operands[2], operands[3]));
+ else
+ emit_insn (gen_insv_32 (operands[0], operands[1],
+ operands[2], operands[3]));
+ DONE;
}")
-(define_insn ""
+(define_insn "insv_32"
[(set (zero_extract:SI (match_operand:SI 0 "register_operand" "+r,r")
(match_operand:SI 1 "uint5_operand" "")
(match_operand:SI 2 "uint5_operand" ""))
@@ -5203,6 +6606,44 @@
[(set_attr "type" "shift")
(set_attr "length" "4")])
+(define_insn "insv_64"
+ [(set (zero_extract:DI (match_operand:DI 0 "register_operand" "+r,r")
+ (match_operand:DI 1 "uint32_operand" "")
+ (match_operand:DI 2 "uint32_operand" ""))
+ (match_operand:DI 3 "arith32_operand" "r,L"))]
+ "TARGET_64BIT"
+ "@
+ depd %3,%2+%1-1,%1,%0
+ depdi %3,%2+%1-1,%1,%0"
+ [(set_attr "type" "shift,shift")
+ (set_attr "length" "4,4")])
+
+;; Optimize insertion of const_int values of type 1...1xxxx.
+(define_insn ""
+ [(set (zero_extract:DI (match_operand:DI 0 "register_operand" "+r")
+ (match_operand:DI 1 "uint32_operand" "")
+ (match_operand:DI 2 "uint32_operand" ""))
+ (match_operand:DI 3 "const_int_operand" ""))]
+ "(INTVAL (operands[3]) & 0x10) != 0
+ && TARGET_64BIT
+ && (~INTVAL (operands[3]) & ((1L << INTVAL (operands[1])) - 1) & ~0xf) == 0"
+ "*
+{
+ operands[3] = GEN_INT ((INTVAL (operands[3]) & 0xf) - 0x10);
+ return \"depdi %3,%2+%1-1,%1,%0\";
+}"
+ [(set_attr "type" "shift")
+ (set_attr "length" "4")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ashift:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r"))
+ (const_int 32)))]
+ "TARGET_64BIT"
+ "depd,z %1,31,32,%0"
+ [(set_attr "type" "shift")
+ (set_attr "length" "4")])
+
;; This insn is used for some loop tests, typically loops reversed when
;; strength reduction is used. It is actually created when the instruction
;; combination phase combines the special loop test. Since this insn
@@ -5751,7 +7192,7 @@
(clobber (reg:SI 31))])
(set (match_operand:SI 0 "register_operand" "")
(reg:SI 29))]
- "! TARGET_PORTABLE_RUNTIME"
+ "! TARGET_PORTABLE_RUNTIME && !TARGET_64BIT"
"
{
operands[2] = gen_reg_rtx (SImode);
@@ -5769,7 +7210,7 @@
(clobber (reg:SI 26))
(clobber (reg:SI 22))
(clobber (reg:SI 31))]
- ""
+ "!TARGET_64BIT"
"*
{
/* Must import the magic millicode routine. */
diff --git a/gcc/config/pa/pa64-regs.h b/gcc/config/pa/pa64-regs.h
index e9affa0..5e99ad3 100644
--- a/gcc/config/pa/pa64-regs.h
+++ b/gcc/config/pa/pa64-regs.h
@@ -56,7 +56,7 @@ Boston, MA 02111-1307, USA. */
Reg 20-22 = Temporary Registers
Reg 23-26 = Temporary/Parameter Registers
Reg 27 = Global Data Pointer (hp)
- Reg 28 = Temporary/???/Return Value register
+ Reg 28 = Temporary/Return Value register
Reg 29 = Temporary/Static Chain/Return Value register #2
Reg 30 = stack pointer
Reg 31 = Temporary/Millicode Return Pointer (hp)
diff --git a/gcc/config/pa/pa64-start.h b/gcc/config/pa/pa64-start.h
index 6201b5b..e307382 100644
--- a/gcc/config/pa/pa64-start.h
+++ b/gcc/config/pa/pa64-start.h
@@ -3,4 +3,4 @@
checking for TARGET_64BIT. */
#define TARGET_64BIT 1
#define TARGET_PA_11 1
-#defien TARGET_PA_20 1
+#define TARGET_PA_20 1