aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorRichard Henderson <rth@redhat.com>2009-07-08 09:41:23 -0700
committerRichard Henderson <rth@gcc.gnu.org>2009-07-08 09:41:23 -0700
commitac2e563fcfce40295dc962d8e98796b794726b0f (patch)
treef71db91f4b324227a6722f9b26010f65116c833b /gcc
parentacb78844c730bdb9b411442caa4df7ee78309596 (diff)
downloadgcc-ac2e563fcfce40295dc962d8e98796b794726b0f.zip
gcc-ac2e563fcfce40295dc962d8e98796b794726b0f.tar.gz
gcc-ac2e563fcfce40295dc962d8e98796b794726b0f.tar.bz2
re PR target/38900 (ICE: unable to find a register to spill)
PR target/38900 * config/i386/i386.h (CONDITIONAL_REGISTER_USAGE): Move to i386.c. (enum reg_class): Add CLOBBERED_REGS. (REG_CLASS_NAMES, REG_CLASS_CONTENTS): Likewise. * config/i386/i386.c (ix86_conditional_register_usage): Moved from CONDITIONAL_REGISTER_USAGE; build CLOBBERED_REGS for 64-bit. (ix86_function_ok_for_sibcall): Tidy. Disallow MS->SYSV sibcalls. (ix86_expand_call): Use sibcall_insn_operand when needed. Don't force 64-bit sibcalls into R11. * config/i386/constraints.md (U): New constraint. * config/i386/i386.md (sibcall_1, sibcall_value_1): Use it. (sibcall_1_rex64, sibcall_value_1_rex64): Likewise. (sibcall_1_rex64_v, sibcall_value_1_rex64_v): Remove. From-SVN: r149373
Diffstat (limited to 'gcc')
-rw-r--r--gcc/ChangeLog16
-rw-r--r--gcc/config/i386/constraints.md5
-rw-r--r--gcc/config/i386/i386-protos.h1
-rw-r--r--gcc/config/i386/i386.c165
-rw-r--r--gcc/config/i386/i386.h58
-rw-r--r--gcc/config/i386/i386.md49
6 files changed, 165 insertions, 129 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 456879c..47f8920 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,19 @@
+2009-07-08 Richard Henderson <rth@redhat.com>
+
+ PR target/38900
+ * config/i386/i386.h (CONDITIONAL_REGISTER_USAGE): Move to i386.c.
+ (enum reg_class): Add CLOBBERED_REGS.
+ (REG_CLASS_NAMES, REG_CLASS_CONTENTS): Likewise.
+ * config/i386/i386.c (ix86_conditional_register_usage): Moved
+ from CONDITIONAL_REGISTER_USAGE; build CLOBBERED_REGS for 64-bit.
+ (ix86_function_ok_for_sibcall): Tidy. Disallow MS->SYSV sibcalls.
+ (ix86_expand_call): Use sibcall_insn_operand when needed. Don't
+ force 64-bit sibcalls into R11.
+ * config/i386/constraints.md (U): New constraint.
+ * config/i386/i386.md (sibcall_1, sibcall_value_1): Use it.
+ (sibcall_1_rex64, sibcall_value_1_rex64): Likewise.
+ (sibcall_1_rex64_v, sibcall_value_1_rex64_v): Remove.
+
2009-07-08 Shujing Zhao <pearly.zhao@oracle.com>
* basic-block.h (dump_regset, debug_regset): Remove duplicate
diff --git a/gcc/config/i386/constraints.md b/gcc/config/i386/constraints.md
index 134ef61..89722bb 100644
--- a/gcc/config/i386/constraints.md
+++ b/gcc/config/i386/constraints.md
@@ -18,7 +18,7 @@
;; <http://www.gnu.org/licenses/>.
;;; Unused letters:
-;;; B H TU W
+;;; B H T W
;;; h jk vw z
;; Integer register constraints.
@@ -62,6 +62,9 @@
"The @code{a} and @code{d} registers, as a pair (for instructions
that return half the result in one and half in the other).")
+(define_register_constraint "U" "CLOBBERED_REGS"
+ "The call-clobbered integer registers.")
+
;; Floating-point register constraints.
(define_register_constraint "f"
"TARGET_80387 || TARGET_FLOAT_RETURNS_IN_80387 ? FLOAT_REGS : NO_REGS"
diff --git a/gcc/config/i386/i386-protos.h b/gcc/config/i386/i386-protos.h
index 54d30b7..d1d601a 100644
--- a/gcc/config/i386/i386-protos.h
+++ b/gcc/config/i386/i386-protos.h
@@ -22,6 +22,7 @@ along with GCC; see the file COPYING3. If not see
/* Functions in i386.c */
extern void override_options (bool);
extern void optimization_options (int, int);
+extern void ix86_conditional_register_usage (void);
extern int ix86_can_use_return_insn_p (void);
extern void ix86_setup_frame_addresses (void);
diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c
index 1da1896..15a73d8 100644
--- a/gcc/config/i386/i386.c
+++ b/gcc/config/i386/i386.c
@@ -3418,6 +3418,79 @@ override_options (bool main_args_p)
target_option_default_node = target_option_current_node
= build_target_option_node ();
}
+
+/* Update register usage after having seen the compiler flags. */
+
+void
+ix86_conditional_register_usage (void)
+{
+ int i;
+ unsigned int j;
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ if (fixed_regs[i] > 1)
+ fixed_regs[i] = (fixed_regs[i] == (TARGET_64BIT ? 3 : 2));
+ if (call_used_regs[i] > 1)
+ call_used_regs[i] = (call_used_regs[i] == (TARGET_64BIT ? 3 : 2));
+ }
+
+ /* The PIC register, if it exists, is fixed. */
+ j = PIC_OFFSET_TABLE_REGNUM;
+ if (j != INVALID_REGNUM)
+ fixed_regs[j] = call_used_regs[j] = 1;
+
+ /* The MS_ABI changes the set of call-used registers. */
+ if (TARGET_64BIT && ix86_cfun_abi () == MS_ABI)
+ {
+ call_used_regs[SI_REG] = 0;
+ call_used_regs[DI_REG] = 0;
+ call_used_regs[XMM6_REG] = 0;
+ call_used_regs[XMM7_REG] = 0;
+ for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
+ call_used_regs[i] = 0;
+ }
+
+ /* The default setting of CLOBBERED_REGS is for 32-bit; add in the
+ other call-clobbered regs for 64-bit. */
+ if (TARGET_64BIT)
+ {
+ CLEAR_HARD_REG_SET (reg_class_contents[(int)CLOBBERED_REGS]);
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (TEST_HARD_REG_BIT (reg_class_contents[(int)GENERAL_REGS], i)
+ && call_used_regs[i])
+ SET_HARD_REG_BIT (reg_class_contents[(int)CLOBBERED_REGS], i);
+ }
+
+ /* If MMX is disabled, squash the registers. */
+ if (! TARGET_MMX)
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (TEST_HARD_REG_BIT (reg_class_contents[(int)MMX_REGS], i))
+ fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
+
+ /* If SSE is disabled, squash the registers. */
+ if (! TARGET_SSE)
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (TEST_HARD_REG_BIT (reg_class_contents[(int)SSE_REGS], i))
+ fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
+
+ /* If the FPU is disabled, squash the registers. */
+ if (! (TARGET_80387 || TARGET_FLOAT_RETURNS_IN_80387))
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (TEST_HARD_REG_BIT (reg_class_contents[(int)FLOAT_REGS], i))
+ fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
+
+ /* If 32-bit, squash the 64-bit registers. */
+ if (! TARGET_64BIT)
+ {
+ for (i = FIRST_REX_INT_REG; i <= LAST_REX_INT_REG; i++)
+ reg_names[i] = "";
+ for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
+ reg_names[i] = "";
+ }
+}
+
/* Save the current options */
@@ -4193,7 +4266,7 @@ optimization_options (int level, int size ATTRIBUTE_UNUSED)
static bool
ix86_function_ok_for_sibcall (tree decl, tree exp)
{
- tree func;
+ tree type, decl_or_type;
rtx a, b;
/* If we are generating position-independent code, we cannot sibcall
@@ -4202,13 +4275,23 @@ ix86_function_ok_for_sibcall (tree decl, tree exp)
if (!TARGET_64BIT && flag_pic && (!decl || !targetm.binds_local_p (decl)))
return false;
+ /* If we need to align the outgoing stack, then sibcalling would
+ unalign the stack, which may break the called function. */
+ if (ix86_incoming_stack_boundary < PREFERRED_STACK_BOUNDARY)
+ return false;
+
if (decl)
- func = decl;
+ {
+ decl_or_type = decl;
+ type = TREE_TYPE (decl);
+ }
else
{
- func = TREE_TYPE (CALL_EXPR_FN (exp));
- if (POINTER_TYPE_P (func))
- func = TREE_TYPE (func);
+ /* We're looking at the CALL_EXPR, we need the type of the function. */
+ type = CALL_EXPR_FN (exp); /* pointer expression */
+ type = TREE_TYPE (type); /* pointer type */
+ type = TREE_TYPE (type); /* function type */
+ decl_or_type = type;
}
/* Check that the return value locations are the same. Like
@@ -4220,7 +4303,7 @@ ix86_function_ok_for_sibcall (tree decl, tree exp)
differences in the return value ABI. Note that it is ok for one
of the functions to have void return type as long as the return
value of the other is passed in a register. */
- a = ix86_function_value (TREE_TYPE (exp), func, false);
+ a = ix86_function_value (TREE_TYPE (exp), decl_or_type, false);
b = ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)),
cfun->decl, false);
if (STACK_REG_P (a) || STACK_REG_P (b))
@@ -4233,38 +4316,32 @@ ix86_function_ok_for_sibcall (tree decl, tree exp)
else if (!rtx_equal_p (a, b))
return false;
- /* If this call is indirect, we'll need to be able to use a call-clobbered
- register for the address of the target function. Make sure that all
- such registers are not used for passing parameters. */
- if (!decl && !TARGET_64BIT)
+ if (TARGET_64BIT)
{
- tree type;
-
- /* We're looking at the CALL_EXPR, we need the type of the function. */
- type = CALL_EXPR_FN (exp); /* pointer expression */
- type = TREE_TYPE (type); /* pointer type */
- type = TREE_TYPE (type); /* function type */
-
- if (ix86_function_regparm (type, NULL) >= 3)
+ /* The SYSV ABI has more call-clobbered registers;
+ disallow sibcalls from MS to SYSV. */
+ if (cfun->machine->call_abi == MS_ABI
+ && ix86_function_type_abi (type) == SYSV_ABI)
+ return false;
+ }
+ else
+ {
+ /* If this call is indirect, we'll need to be able to use a
+ call-clobbered register for the address of the target function.
+ Make sure that all such registers are not used for passing
+ parameters. Note that DLLIMPORT functions are indirect. */
+ if (!decl
+ || (TARGET_DLLIMPORT_DECL_ATTRIBUTES && DECL_DLLIMPORT_P (decl)))
{
- /* ??? Need to count the actual number of registers to be used,
- not the possible number of registers. Fix later. */
- return false;
+ if (ix86_function_regparm (type, NULL) >= 3)
+ {
+ /* ??? Need to count the actual number of registers to be used,
+ not the possible number of registers. Fix later. */
+ return false;
+ }
}
}
- /* Dllimport'd functions are also called indirectly. */
- if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
- && !TARGET_64BIT
- && decl && DECL_DLLIMPORT_P (decl)
- && ix86_function_regparm (TREE_TYPE (decl), NULL) >= 3)
- return false;
-
- /* If we need to align the outgoing stack, then sibcalling would
- unalign the stack, which may break the called function. */
- if (ix86_incoming_stack_boundary < PREFERRED_STACK_BOUNDARY)
- return false;
-
/* Otherwise okay. That also includes certain types of indirect calls. */
return true;
}
@@ -4321,7 +4398,8 @@ ix86_handle_cconv_attribute (tree *node, tree name,
if (TARGET_64BIT)
{
/* Do not warn when emulating the MS ABI. */
- if (TREE_CODE (*node) != FUNCTION_TYPE || ix86_function_type_abi (*node)!=MS_ABI)
+ if (TREE_CODE (*node) != FUNCTION_TYPE
+ || ix86_function_type_abi (*node) != MS_ABI)
warning (OPT_Wattributes, "%qE attribute ignored",
name);
*no_add_attrs = true;
@@ -4790,7 +4868,8 @@ init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
/* Set up the number of registers to use for passing arguments. */
if (cum->call_abi == MS_ABI && !ACCUMULATE_OUTGOING_ARGS)
- sorry ("ms_abi attribute require -maccumulate-outgoing-args or subtarget optimization implying it");
+ sorry ("ms_abi attribute requires -maccumulate-outgoing-args "
+ "or subtarget optimization implying it");
cum->nregs = ix86_regparm;
if (TARGET_64BIT)
{
@@ -7933,7 +8012,8 @@ ix86_compute_frame_layout (struct ix86_frame *frame)
|| (TARGET_64BIT && frame->to_allocate >= (HOST_WIDE_INT) 0x80000000))
frame->save_regs_using_mov = false;
- if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE && current_function_sp_is_unchanging
+ if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE
+ && current_function_sp_is_unchanging
&& current_function_is_leaf
&& !ix86_current_function_calls_tls_descriptor)
{
@@ -19151,20 +19231,13 @@ ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
&& GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
&& !local_symbolic_operand (XEXP (fnaddr, 0), VOIDmode))
fnaddr = gen_rtx_MEM (QImode, construct_plt_address (XEXP (fnaddr, 0)));
- else if (! call_insn_operand (XEXP (fnaddr, 0), Pmode))
+ else if (sibcall
+ ? !sibcall_insn_operand (XEXP (fnaddr, 0), Pmode)
+ : !call_insn_operand (XEXP (fnaddr, 0), Pmode))
{
fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
fnaddr = gen_rtx_MEM (QImode, fnaddr);
}
- if (sibcall && TARGET_64BIT
- && !constant_call_address_operand (XEXP (fnaddr, 0), Pmode))
- {
- rtx addr;
- addr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
- fnaddr = gen_rtx_REG (Pmode, R11_REG);
- emit_move_insn (fnaddr, addr);
- fnaddr = gen_rtx_MEM (QImode, fnaddr);
- }
call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
if (retval)
diff --git a/gcc/config/i386/i386.h b/gcc/config/i386/i386.h
index 2d49927..97483b7 100644
--- a/gcc/config/i386/i386.h
+++ b/gcc/config/i386/i386.h
@@ -960,52 +960,7 @@ enum target_cpu_default
#define OVERRIDE_ABI_FORMAT(FNDECL) ix86_call_abi_override (FNDECL)
/* Macro to conditionally modify fixed_regs/call_used_regs. */
-#define CONDITIONAL_REGISTER_USAGE \
-do { \
- int i; \
- unsigned int j; \
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) \
- { \
- if (fixed_regs[i] > 1) \
- fixed_regs[i] = (fixed_regs[i] == (TARGET_64BIT ? 3 : 2)); \
- if (call_used_regs[i] > 1) \
- call_used_regs[i] = (call_used_regs[i] \
- == (TARGET_64BIT ? 3 : 2)); \
- } \
- j = PIC_OFFSET_TABLE_REGNUM; \
- if (j != INVALID_REGNUM) \
- fixed_regs[j] = call_used_regs[j] = 1; \
- if (TARGET_64BIT \
- && ((cfun && cfun->machine->call_abi == MS_ABI) \
- || (!cfun && ix86_abi == MS_ABI))) \
- { \
- call_used_regs[SI_REG] = 0; \
- call_used_regs[DI_REG] = 0; \
- call_used_regs[XMM6_REG] = 0; \
- call_used_regs[XMM7_REG] = 0; \
- for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++) \
- call_used_regs[i] = 0; \
- } \
- if (! TARGET_MMX) \
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) \
- if (TEST_HARD_REG_BIT (reg_class_contents[(int)MMX_REGS], i)) \
- fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = ""; \
- if (! TARGET_SSE) \
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) \
- if (TEST_HARD_REG_BIT (reg_class_contents[(int)SSE_REGS], i)) \
- fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = ""; \
- if (! (TARGET_80387 || TARGET_FLOAT_RETURNS_IN_80387)) \
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) \
- if (TEST_HARD_REG_BIT (reg_class_contents[(int)FLOAT_REGS], i)) \
- fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = ""; \
- if (! TARGET_64BIT) \
- { \
- for (i = FIRST_REX_INT_REG; i <= LAST_REX_INT_REG; i++) \
- reg_names[i] = ""; \
- for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++) \
- reg_names[i] = ""; \
- } \
- } while (0)
+#define CONDITIONAL_REGISTER_USAGE ix86_conditional_register_usage ()
/* Return number of consecutive hard regs needed starting at reg REGNO
to hold something of mode MODE.
@@ -1225,6 +1180,7 @@ enum reg_class
NO_REGS,
AREG, DREG, CREG, BREG, SIREG, DIREG,
AD_REGS, /* %eax/%edx for DImode */
+ CLOBBERED_REGS, /* call-clobbered integers */
Q_REGS, /* %eax %ebx %ecx %edx */
NON_Q_REGS, /* %esi %edi %ebp %esp */
INDEX_REGS, /* %eax %ebx %ecx %edx %esi %edi %ebp */
@@ -1273,6 +1229,7 @@ enum reg_class
"AREG", "DREG", "CREG", "BREG", \
"SIREG", "DIREG", \
"AD_REGS", \
+ "CLOBBERED_REGS", \
"Q_REGS", "NON_Q_REGS", \
"INDEX_REGS", \
"LEGACY_REGS", \
@@ -1290,9 +1247,11 @@ enum reg_class
"FLOAT_INT_SSE_REGS", \
"ALL_REGS" }
-/* Define which registers fit in which classes.
- This is an initializer for a vector of HARD_REG_SET
- of length N_REG_CLASSES. */
+/* Define which registers fit in which classes. This is an initializer
+ for a vector of HARD_REG_SET of length N_REG_CLASSES.
+
+ Note that the default setting of CLOBBERED_REGS is for 32-bit; this
+ is adjusted by CONDITIONAL_REGISTER_USAGE for the 64-bit ABI in effect. */
#define REG_CLASS_CONTENTS \
{ { 0x00, 0x0 }, \
@@ -1300,6 +1259,7 @@ enum reg_class
{ 0x04, 0x0 }, { 0x08, 0x0 }, /* CREG, BREG */ \
{ 0x10, 0x0 }, { 0x20, 0x0 }, /* SIREG, DIREG */ \
{ 0x03, 0x0 }, /* AD_REGS */ \
+ { 0x07, 0x0 }, /* CLOBBERED_REGS */ \
{ 0x0f, 0x0 }, /* Q_REGS */ \
{ 0x1100f0, 0x1fe0 }, /* NON_Q_REGS */ \
{ 0x7f, 0x1fe0 }, /* INDEX_REGS */ \
diff --git a/gcc/config/i386/i386.md b/gcc/config/i386/i386.md
index 353a70b..47f2ce6 100644
--- a/gcc/config/i386/i386.md
+++ b/gcc/config/i386/i386.md
@@ -15536,14 +15536,12 @@
[(set_attr "type" "call")])
(define_insn "*sibcall_1"
- [(call (mem:QI (match_operand:SI 0 "sibcall_insn_operand" "s,c,d,a"))
+ [(call (mem:QI (match_operand:SI 0 "sibcall_insn_operand" "s,U"))
(match_operand 1 "" ""))]
"SIBLING_CALL_P (insn) && !TARGET_64BIT"
-{
- if (constant_call_address_operand (operands[0], Pmode))
- return "jmp\t%P0";
- return "jmp\t%A0";
-}
+ "@
+ jmp\t%P0
+ jmp\t%A0"
[(set_attr "type" "call")])
(define_insn "*call_1_rex64"
@@ -15590,22 +15588,15 @@
[(set_attr "type" "call")])
(define_insn "*sibcall_1_rex64"
- [(call (mem:QI (match_operand:DI 0 "constant_call_address_operand" ""))
+ [(call (mem:QI (match_operand:DI 0 "sibcall_insn_operand" "s,U"))
(match_operand 1 "" ""))]
"SIBLING_CALL_P (insn) && TARGET_64BIT"
- "jmp\t%P0"
- [(set_attr "type" "call")])
-
-(define_insn "*sibcall_1_rex64_v"
- [(call (mem:QI (reg:DI R11_REG))
- (match_operand 0 "" ""))]
- "SIBLING_CALL_P (insn) && TARGET_64BIT"
- "jmp\t{*%%}r11"
+ "@
+ jmp\t%P0
+ jmp\t%A0"
[(set_attr "type" "call")])
-
;; Call subroutine, returning value in operand 0
-
(define_expand "call_value_pop"
[(parallel [(set (match_operand 0 "" "")
(call (match_operand:QI 1 "" "")
@@ -22183,14 +22174,12 @@
(define_insn "*sibcall_value_1"
[(set (match_operand 0 "" "")
- (call (mem:QI (match_operand:SI 1 "sibcall_insn_operand" "s,c,d,a"))
+ (call (mem:QI (match_operand:SI 1 "sibcall_insn_operand" "s,U"))
(match_operand:SI 2 "" "")))]
"SIBLING_CALL_P (insn) && !TARGET_64BIT"
-{
- if (constant_call_address_operand (operands[1], Pmode))
- return "jmp\t%P1";
- return "jmp\t%A1";
-}
+ "@
+ jmp\t%P1
+ jmp\t%A1"
[(set_attr "type" "callv")])
(define_insn "*call_value_1_rex64"
@@ -22241,18 +22230,12 @@
(define_insn "*sibcall_value_1_rex64"
[(set (match_operand 0 "" "")
- (call (mem:QI (match_operand:DI 1 "constant_call_address_operand" ""))
+ (call (mem:QI (match_operand:DI 1 "sibcall_insn_operand" "s,U"))
(match_operand:DI 2 "" "")))]
"SIBLING_CALL_P (insn) && TARGET_64BIT"
- "jmp\t%P1"
- [(set_attr "type" "callv")])
-
-(define_insn "*sibcall_value_1_rex64_v"
- [(set (match_operand 0 "" "")
- (call (mem:QI (reg:DI R11_REG))
- (match_operand:DI 1 "" "")))]
- "SIBLING_CALL_P (insn) && TARGET_64BIT"
- "jmp\t{*%%}r11"
+ "@
+ jmp\t%P1
+ jmp\t%A1"
[(set_attr "type" "callv")])
;; We used to use "int $5", in honor of #BR which maps to interrupt vector 5.