aboutsummaryrefslogtreecommitdiff
path: root/gcc/config/avr/avr.c
diff options
context:
space:
mode:
Diffstat (limited to 'gcc/config/avr/avr.c')
-rw-r--r--gcc/config/avr/avr.c592
1 files changed, 568 insertions, 24 deletions
diff --git a/gcc/config/avr/avr.c b/gcc/config/avr/avr.c
index e3b85d6..c1753300 100644
--- a/gcc/config/avr/avr.c
+++ b/gcc/config/avr/avr.c
@@ -49,6 +49,10 @@
#include "params.h"
#include "df.h"
+#ifndef CONST_FIXED_P
+#define CONST_FIXED_P(X) (CONST_FIXED == GET_CODE (X))
+#endif
+
/* Maximal allowed offset for an address in the LD command */
#define MAX_LD_OFFSET(MODE) (64 - (signed)GET_MODE_SIZE (MODE))
@@ -264,6 +268,23 @@ avr_popcount_each_byte (rtx xval, int n_bytes, int pop_mask)
return true;
}
+
+/* Access some RTX as INT_MODE. If X is a CONST_FIXED we can get
+ the bit representation of X by "casting" it to CONST_INT. */
+
+rtx
+avr_to_int_mode (rtx x)
+{
+ enum machine_mode mode = GET_MODE (x);
+
+ return VOIDmode == mode
+ ? x
+ : simplify_gen_subreg (int_mode_for_mode (mode), x, mode, 0);
+}
+
+
+/* Implement `TARGET_OPTION_OVERRIDE'. */
+
static void
avr_option_override (void)
{
@@ -389,9 +410,14 @@ avr_regno_reg_class (int r)
}
+/* Implement `TARGET_SCALAR_MODE_SUPPORTED_P'. */
+
static bool
avr_scalar_mode_supported_p (enum machine_mode mode)
{
+ if (ALL_FIXED_POINT_MODE_P (mode))
+ return true;
+
if (PSImode == mode)
return true;
@@ -715,6 +741,58 @@ avr_initial_elimination_offset (int from, int to)
}
}
+
+/* Helper for the function below. */
+
+static void
+avr_adjust_type_node (tree *node, enum machine_mode mode, int sat_p)
+{
+ *node = make_node (FIXED_POINT_TYPE);
+ TYPE_SATURATING (*node) = sat_p;
+ TYPE_UNSIGNED (*node) = UNSIGNED_FIXED_POINT_MODE_P (mode);
+ TYPE_IBIT (*node) = GET_MODE_IBIT (mode);
+ TYPE_FBIT (*node) = GET_MODE_FBIT (mode);
+ TYPE_PRECISION (*node) = GET_MODE_BITSIZE (mode);
+ TYPE_ALIGN (*node) = 8;
+ SET_TYPE_MODE (*node, mode);
+
+ layout_type (*node);
+}
+
+
+/* Implement `TARGET_BUILD_BUILTIN_VA_LIST'. */
+
+static tree
+avr_build_builtin_va_list (void)
+{
+ /* avr-modes.def adjusts [U]TA to be 64-bit modes with 48 fractional bits.
+ This is more appropriate for the 8-bit machine AVR than 128-bit modes.
+ The ADJUST_IBIT/FBIT are handled in toplev:init_adjust_machine_modes()
+ which is auto-generated by genmodes, but the compiler assigns [U]DAmode
+ to the long long accum modes instead of the desired [U]TAmode.
+
+ Fix this now, right after node setup in tree.c:build_common_tree_nodes().
+ This must run before c-cppbuiltin.c:builtin_define_fixed_point_constants()
+ which built-in defines macros like __ULLACCUM_FBIT__ that are used by
+ libgcc to detect IBIT and FBIT. */
+
+ avr_adjust_type_node (&ta_type_node, TAmode, 0);
+ avr_adjust_type_node (&uta_type_node, UTAmode, 0);
+ avr_adjust_type_node (&sat_ta_type_node, TAmode, 1);
+ avr_adjust_type_node (&sat_uta_type_node, UTAmode, 1);
+
+ unsigned_long_long_accum_type_node = uta_type_node;
+ long_long_accum_type_node = ta_type_node;
+ sat_unsigned_long_long_accum_type_node = sat_uta_type_node;
+ sat_long_long_accum_type_node = sat_ta_type_node;
+
+ /* Dispatch to the default handler. */
+
+ return std_build_builtin_va_list ();
+}
+
+
+/* Implement `TARGET_BUILTIN_SETJMP_FRAME_VALUE'. */
/* Actual start of frame is virtual_stack_vars_rtx this is offset from
frame pointer by +STARTING_FRAME_OFFSET.
Using saved frame = virtual_stack_vars_rtx - STARTING_FRAME_OFFSET
@@ -723,10 +801,13 @@ avr_initial_elimination_offset (int from, int to)
static rtx
avr_builtin_setjmp_frame_value (void)
{
- return gen_rtx_MINUS (Pmode, virtual_stack_vars_rtx,
- gen_int_mode (STARTING_FRAME_OFFSET, Pmode));
+ rtx xval = gen_reg_rtx (Pmode);
+ emit_insn (gen_subhi3 (xval, virtual_stack_vars_rtx,
+ gen_int_mode (STARTING_FRAME_OFFSET, Pmode)));
+ return xval;
}
+
/* Return contents of MEM at frame pointer + stack size + 1 (+2 if 3 byte PC).
This is return address of function. */
rtx
@@ -1580,7 +1661,7 @@ avr_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
MEM, strict);
if (strict
- && DImode == mode
+ && GET_MODE_SIZE (mode) > 4
&& REG_X == REGNO (x))
{
ok = false;
@@ -2081,6 +2162,14 @@ avr_print_operand (FILE *file, rtx x, int code)
/* Use normal symbol for direct address no linker trampoline needed */
output_addr_const (file, x);
}
+ else if (GET_CODE (x) == CONST_FIXED)
+ {
+ HOST_WIDE_INT ival = INTVAL (avr_to_int_mode (x));
+ if (code != 0)
+ output_operand_lossage ("Unsupported code '%c'for fixed-point:",
+ code);
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
+ }
else if (GET_CODE (x) == CONST_DOUBLE)
{
long val;
@@ -2116,6 +2205,7 @@ notice_update_cc (rtx body ATTRIBUTE_UNUSED, rtx insn)
case CC_OUT_PLUS:
case CC_OUT_PLUS_NOCLOBBER:
+ case CC_MINUS:
case CC_LDI:
{
rtx *op = recog_data.operand;
@@ -2139,6 +2229,11 @@ notice_update_cc (rtx body ATTRIBUTE_UNUSED, rtx insn)
cc = (enum attr_cc) icc;
break;
+ case CC_MINUS:
+ avr_out_minus (op, &len_dummy, &icc);
+ cc = (enum attr_cc) icc;
+ break;
+
case CC_LDI:
cc = (op[1] == CONST0_RTX (GET_MODE (op[0]))
@@ -2779,9 +2874,11 @@ output_movqi (rtx insn, rtx operands[], int *real_l)
if (real_l)
*real_l = 1;
- if (register_operand (dest, QImode))
+ gcc_assert (1 == GET_MODE_SIZE (GET_MODE (dest)));
+
+ if (REG_P (dest))
{
- if (register_operand (src, QImode)) /* mov r,r */
+ if (REG_P (src)) /* mov r,r */
{
if (test_hard_reg_class (STACK_REG, dest))
return "out %0,%1";
@@ -2803,7 +2900,7 @@ output_movqi (rtx insn, rtx operands[], int *real_l)
rtx xop[2];
xop[0] = dest;
- xop[1] = src == const0_rtx ? zero_reg_rtx : src;
+ xop[1] = src == CONST0_RTX (GET_MODE (dest)) ? zero_reg_rtx : src;
return out_movqi_mr_r (insn, xop, real_l);
}
@@ -2825,6 +2922,8 @@ output_movhi (rtx insn, rtx xop[], int *plen)
return avr_out_lpm (insn, xop, plen);
}
+ gcc_assert (2 == GET_MODE_SIZE (GET_MODE (dest)));
+
if (REG_P (dest))
{
if (REG_P (src)) /* mov r,r */
@@ -2843,7 +2942,6 @@ output_movhi (rtx insn, rtx xop[], int *plen)
return TARGET_NO_INTERRUPTS
? avr_asm_len ("out __SP_H__,%B1" CR_TAB
"out __SP_L__,%A1", xop, plen, -2)
-
: avr_asm_len ("in __tmp_reg__,__SREG__" CR_TAB
"cli" CR_TAB
"out __SP_H__,%B1" CR_TAB
@@ -2880,7 +2978,7 @@ output_movhi (rtx insn, rtx xop[], int *plen)
rtx xop[2];
xop[0] = dest;
- xop[1] = src == const0_rtx ? zero_reg_rtx : src;
+ xop[1] = src == CONST0_RTX (GET_MODE (dest)) ? zero_reg_rtx : src;
return out_movhi_mr_r (insn, xop, plen);
}
@@ -3403,9 +3501,10 @@ output_movsisf (rtx insn, rtx operands[], int *l)
if (!l)
l = &dummy;
- if (register_operand (dest, VOIDmode))
+ gcc_assert (4 == GET_MODE_SIZE (GET_MODE (dest)));
+ if (REG_P (dest))
{
- if (register_operand (src, VOIDmode)) /* mov r,r */
+ if (REG_P (src)) /* mov r,r */
{
if (true_regnum (dest) > true_regnum (src))
{
@@ -3440,10 +3539,10 @@ output_movsisf (rtx insn, rtx operands[], int *l)
{
return output_reload_insisf (operands, NULL_RTX, real_l);
}
- else if (GET_CODE (src) == MEM)
+ else if (MEM_P (src))
return out_movsi_r_mr (insn, operands, real_l); /* mov r,m */
}
- else if (GET_CODE (dest) == MEM)
+ else if (MEM_P (dest))
{
const char *templ;
@@ -4126,14 +4225,25 @@ avr_out_compare (rtx insn, rtx *xop, int *plen)
rtx xval = xop[1];
/* MODE of the comparison. */
- enum machine_mode mode = GET_MODE (xreg);
+ enum machine_mode mode;
/* Number of bytes to operate on. */
- int i, n_bytes = GET_MODE_SIZE (mode);
+ int i, n_bytes = GET_MODE_SIZE (GET_MODE (xreg));
/* Value (0..0xff) held in clobber register xop[2] or -1 if unknown. */
int clobber_val = -1;
+ /* Map fixed mode operands to integer operands with the same binary
+ representation. They are easier to handle in the remainder. */
+
+ if (CONST_FIXED == GET_CODE (xval))
+ {
+ xreg = avr_to_int_mode (xop[0]);
+ xval = avr_to_int_mode (xop[1]);
+ }
+
+ mode = GET_MODE (xreg);
+
gcc_assert (REG_P (xreg));
gcc_assert ((CONST_INT_P (xval) && n_bytes <= 4)
|| (const_double_operand (xval, VOIDmode) && n_bytes == 8));
@@ -4143,7 +4253,7 @@ avr_out_compare (rtx insn, rtx *xop, int *plen)
/* Comparisons == +/-1 and != +/-1 can be done similar to camparing
against 0 by ORing the bytes. This is one instruction shorter.
- Notice that DImode comparisons are always against reg:DI 18
+ Notice that 64-bit comparisons are always against reg:ALL8 18 (ACC_A)
and therefore don't use this. */
if (!test_hard_reg_class (LD_REGS, xreg)
@@ -5884,6 +5994,9 @@ avr_out_plus_1 (rtx *xop, int *plen, enum rtx_code code, int *pcc)
/* MODE of the operation. */
enum machine_mode mode = GET_MODE (xop[0]);
+ /* INT_MODE of the same size. */
+ enum machine_mode imode = int_mode_for_mode (mode);
+
/* Number of bytes to operate on. */
int i, n_bytes = GET_MODE_SIZE (mode);
@@ -5908,8 +6021,11 @@ avr_out_plus_1 (rtx *xop, int *plen, enum rtx_code code, int *pcc)
*pcc = (MINUS == code) ? CC_SET_CZN : CC_CLOBBER;
+ if (CONST_FIXED_P (xval))
+ xval = avr_to_int_mode (xval);
+
if (MINUS == code)
- xval = simplify_unary_operation (NEG, mode, xval, mode);
+ xval = simplify_unary_operation (NEG, imode, xval, imode);
op[2] = xop[3];
@@ -5920,7 +6036,7 @@ avr_out_plus_1 (rtx *xop, int *plen, enum rtx_code code, int *pcc)
{
/* We operate byte-wise on the destination. */
rtx reg8 = simplify_gen_subreg (QImode, xop[0], mode, i);
- rtx xval8 = simplify_gen_subreg (QImode, xval, mode, i);
+ rtx xval8 = simplify_gen_subreg (QImode, xval, imode, i);
/* 8-bit value to operate with this byte. */
unsigned int val8 = UINTVAL (xval8) & GET_MODE_MASK (QImode);
@@ -5941,7 +6057,7 @@ avr_out_plus_1 (rtx *xop, int *plen, enum rtx_code code, int *pcc)
&& i + 2 <= n_bytes
&& test_hard_reg_class (ADDW_REGS, reg8))
{
- rtx xval16 = simplify_gen_subreg (HImode, xval, mode, i);
+ rtx xval16 = simplify_gen_subreg (HImode, xval, imode, i);
unsigned int val16 = UINTVAL (xval16) & GET_MODE_MASK (HImode);
/* Registers R24, X, Y, Z can use ADIW/SBIW with constants < 64
@@ -6085,6 +6201,41 @@ avr_out_plus_noclobber (rtx *xop, int *plen, int *pcc)
}
+/* Output subtraction of register XOP[0] and compile time constant XOP[2]:
+
+ XOP[0] = XOP[0] - XOP[2]
+
+ This is basically the same as `avr_out_plus' except that we subtract.
+ It's needed because (minus x const) is not mapped to (plus x -const)
+ for the fixed point modes. */
+
+const char*
+avr_out_minus (rtx *xop, int *plen, int *pcc)
+{
+ rtx op[4];
+
+ if (pcc)
+ *pcc = (int) CC_SET_CZN;
+
+ if (REG_P (xop[2]))
+ return avr_asm_len ("sub %A0,%A2" CR_TAB
+ "sbc %B0,%B2", xop, plen, -2);
+
+ if (!CONST_INT_P (xop[2])
+ && !CONST_FIXED_P (xop[2]))
+ return avr_asm_len ("subi %A0,lo8(%2)" CR_TAB
+ "sbci %B0,hi8(%2)", xop, plen, -2);
+
+ op[0] = avr_to_int_mode (xop[0]);
+ op[1] = avr_to_int_mode (xop[1]);
+ op[2] = gen_int_mode (-INTVAL (avr_to_int_mode (xop[2])),
+ GET_MODE (op[0]));
+ op[3] = xop[3];
+
+ return avr_out_plus (op, plen, pcc);
+}
+
+
/* Prepare operands of adddi3_const_insn to be used with avr_out_plus_1. */
const char*
@@ -6103,6 +6254,19 @@ avr_out_plus64 (rtx addend, int *plen)
return "";
}
+
+/* Prepare operands of subdi3_const_insn to be used with avr_out_plus64. */
+
+const char*
+avr_out_minus64 (rtx subtrahend, int *plen)
+{
+ rtx xneg = avr_to_int_mode (subtrahend);
+ xneg = simplify_unary_operation (NEG, DImode, xneg, DImode);
+
+ return avr_out_plus64 (xneg, plen);
+}
+
+
/* Output bit operation (IOR, AND, XOR) with register XOP[0] and compile
time constant XOP[2]:
@@ -6442,6 +6606,349 @@ avr_rotate_bytes (rtx operands[])
return true;
}
+
+/* Outputs instructions needed for fixed point type conversion.
+ This includes converting between any fixed point type, as well
+ as converting to any integer type. Conversion between integer
+ types is not supported.
+
+ The number of instructions generated depends on the types
+ being converted and the registers assigned to them.
+
+ The number of instructions required to complete the conversion
+ is least if the registers for source and destination are overlapping
+ and are aligned at the decimal place as actual movement of data is
+ completely avoided. In some cases, the conversion may already be
+ complete without any instructions needed.
+
+ When converting to signed types from signed types, sign extension
+ is implemented.
+
+ Converting signed fractional types requires a bit shift if converting
+ to or from any unsigned fractional type because the decimal place is
+ shifted by 1 bit. When the destination is a signed fractional, the sign
+ is stored in either the carry or T bit. */
+
+const char*
+avr_out_fract (rtx insn, rtx operands[], bool intsigned, int *plen)
+{
+ int i;
+ bool sbit[2];
+ /* ilen: Length of integral part (in bytes)
+ flen: Length of fractional part (in bytes)
+ tlen: Length of operand (in bytes)
+ blen: Length of operand (in bits) */
+ int ilen[2], flen[2], tlen[2], blen[2];
+ int rdest, rsource, offset;
+ int start, end, dir;
+ bool sign_in_T = false, sign_in_Carry = false, sign_done = false;
+ bool widening_sign_extend = false;
+ int clrword = -1, lastclr = 0, clr = 0;
+ rtx xop[6];
+
+ const int dest = 0;
+ const int src = 1;
+
+ xop[dest] = operands[dest];
+ xop[src] = operands[src];
+
+ if (plen)
+ *plen = 0;
+
+ /* Determine format (integer and fractional parts)
+ of types needing conversion. */
+
+ for (i = 0; i < 2; i++)
+ {
+ enum machine_mode mode = GET_MODE (xop[i]);
+
+ tlen[i] = GET_MODE_SIZE (mode);
+ blen[i] = GET_MODE_BITSIZE (mode);
+
+ if (SCALAR_INT_MODE_P (mode))
+ {
+ sbit[i] = intsigned;
+ ilen[i] = GET_MODE_SIZE (mode);
+ flen[i] = 0;
+ }
+ else if (ALL_SCALAR_FIXED_POINT_MODE_P (mode))
+ {
+ sbit[i] = SIGNED_SCALAR_FIXED_POINT_MODE_P (mode);
+ ilen[i] = (GET_MODE_IBIT (mode) + 1) / 8;
+ flen[i] = (GET_MODE_FBIT (mode) + 1) / 8;
+ }
+ else
+ fatal_insn ("unsupported fixed-point conversion", insn);
+ }
+
+ /* Perform sign extension if source and dest are both signed,
+ and there are more integer parts in dest than in source. */
+
+ widening_sign_extend = sbit[dest] && sbit[src] && ilen[dest] > ilen[src];
+
+ rdest = REGNO (xop[dest]);
+ rsource = REGNO (xop[src]);
+ offset = flen[src] - flen[dest];
+
+ /* Position of MSB resp. sign bit. */
+
+ xop[2] = GEN_INT (blen[dest] - 1);
+ xop[3] = GEN_INT (blen[src] - 1);
+
+ /* Store the sign bit if the destination is a signed fract and the source
+ has a sign in the integer part. */
+
+ if (sbit[dest] && ilen[dest] == 0 && sbit[src] && ilen[src] > 0)
+ {
+ /* To avoid using BST and BLD if the source and destination registers
+ overlap or the source is unused after, we can use LSL to store the
+ sign bit in carry since we don't need the integral part of the source.
+ Restoring the sign from carry saves one BLD instruction below. */
+
+ if (reg_unused_after (insn, xop[src])
+ || (rdest < rsource + tlen[src]
+ && rdest + tlen[dest] > rsource))
+ {
+ avr_asm_len ("lsl %T1%t3", xop, plen, 1);
+ sign_in_Carry = true;
+ }
+ else
+ {
+ avr_asm_len ("bst %T1%T3", xop, plen, 1);
+ sign_in_T = true;
+ }
+ }
+
+ /* Pick the correct direction to shift bytes. */
+
+ if (rdest < rsource + offset)
+ {
+ dir = 1;
+ start = 0;
+ end = tlen[dest];
+ }
+ else
+ {
+ dir = -1;
+ start = tlen[dest] - 1;
+ end = -1;
+ }
+
+ /* Perform conversion by moving registers into place, clearing
+ destination registers that do not overlap with any source. */
+
+ for (i = start; i != end; i += dir)
+ {
+ int destloc = rdest + i;
+ int sourceloc = rsource + i + offset;
+
+ /* Source register location is outside range of source register,
+ so clear this byte in the dest. */
+
+ if (sourceloc < rsource
+ || sourceloc >= rsource + tlen[src])
+ {
+ if (AVR_HAVE_MOVW
+ && i + dir != end
+ && (sourceloc + dir < rsource
+ || sourceloc + dir >= rsource + tlen[src])
+ && ((dir == 1 && !(destloc % 2) && !(sourceloc % 2))
+ || (dir == -1 && (destloc % 2) && (sourceloc % 2)))
+ && clrword != -1)
+ {
+ /* Use already cleared word to clear two bytes at a time. */
+
+ int even_i = i & ~1;
+ int even_clrword = clrword & ~1;
+
+ xop[4] = GEN_INT (8 * even_i);
+ xop[5] = GEN_INT (8 * even_clrword);
+ avr_asm_len ("movw %T0%t4,%T0%t5", xop, plen, 1);
+ i += dir;
+ }
+ else
+ {
+ if (i == tlen[dest] - 1
+ && widening_sign_extend
+ && blen[src] - 1 - 8 * offset < 0)
+ {
+ /* The SBRC below that sign-extends would come
+ up with a negative bit number because the sign
+ bit is out of reach. ALso avoid some early-clobber
+ situations because of premature CLR. */
+
+ if (reg_unused_after (insn, xop[src]))
+ avr_asm_len ("lsl %T1%t3" CR_TAB
+ "sbc %T0%t2,%T0%t2", xop, plen, 2);
+ else
+ avr_asm_len ("mov __tmp_reg__,%T1%t3" CR_TAB
+ "lsl __tmp_reg__" CR_TAB
+ "sbc %T0%t2,%T0%t2", xop, plen, 3);
+ sign_done = true;
+
+ continue;
+ }
+
+ /* Do not clear the register if it is going to get
+ sign extended with a MOV later. */
+
+ if (sbit[dest] && sbit[src]
+ && i != tlen[dest] - 1
+ && i >= flen[dest])
+ {
+ continue;
+ }
+
+ xop[4] = GEN_INT (8 * i);
+ avr_asm_len ("clr %T0%t4", xop, plen, 1);
+
+ /* If the last byte was cleared too, we have a cleared
+ word we can MOVW to clear two bytes at a time. */
+
+ if (lastclr)
+ clrword = i;
+
+ clr = 1;
+ }
+ }
+ else if (destloc == sourceloc)
+ {
+ /* Source byte is already in destination: Nothing needed. */
+
+ continue;
+ }
+ else
+ {
+ /* Registers do not line up and source register location
+ is within range: Perform move, shifting with MOV or MOVW. */
+
+ if (AVR_HAVE_MOVW
+ && i + dir != end
+ && sourceloc + dir >= rsource
+ && sourceloc + dir < rsource + tlen[src]
+ && ((dir == 1 && !(destloc % 2) && !(sourceloc % 2))
+ || (dir == -1 && (destloc % 2) && (sourceloc % 2))))
+ {
+ int even_i = i & ~1;
+ int even_i_plus_offset = (i + offset) & ~1;
+
+ xop[4] = GEN_INT (8 * even_i);
+ xop[5] = GEN_INT (8 * even_i_plus_offset);
+ avr_asm_len ("movw %T0%t4,%T1%t5", xop, plen, 1);
+ i += dir;
+ }
+ else
+ {
+ xop[4] = GEN_INT (8 * i);
+ xop[5] = GEN_INT (8 * (i + offset));
+ avr_asm_len ("mov %T0%t4,%T1%t5", xop, plen, 1);
+ }
+ }
+
+ lastclr = clr;
+ clr = 0;
+ }
+
+ /* Perform sign extension if source and dest are both signed,
+ and there are more integer parts in dest than in source. */
+
+ if (widening_sign_extend)
+ {
+ if (!sign_done)
+ {
+ xop[4] = GEN_INT (blen[src] - 1 - 8 * offset);
+
+ /* Register was cleared above, so can become 0xff and extended.
+ Note: Instead of the CLR/SBRC/COM the sign extension could
+ be performed after the LSL below by means of a SBC if only
+ one byte has to be shifted left. */
+
+ avr_asm_len ("sbrc %T0%T4" CR_TAB
+ "com %T0%t2", xop, plen, 2);
+ }
+
+ /* Sign extend additional bytes by MOV and MOVW. */
+
+ start = tlen[dest] - 2;
+ end = flen[dest] + ilen[src] - 1;
+
+ for (i = start; i != end; i--)
+ {
+ if (AVR_HAVE_MOVW && i != start && i-1 != end)
+ {
+ i--;
+ xop[4] = GEN_INT (8 * i);
+ xop[5] = GEN_INT (8 * (tlen[dest] - 2));
+ avr_asm_len ("movw %T0%t4,%T0%t5", xop, plen, 1);
+ }
+ else
+ {
+ xop[4] = GEN_INT (8 * i);
+ xop[5] = GEN_INT (8 * (tlen[dest] - 1));
+ avr_asm_len ("mov %T0%t4,%T0%t5", xop, plen, 1);
+ }
+ }
+ }
+
+ /* If destination is a signed fract, and the source was not, a shift
+ by 1 bit is needed. Also restore sign from carry or T. */
+
+ if (sbit[dest] && !ilen[dest] && (!sbit[src] || ilen[src]))
+ {
+ /* We have flen[src] non-zero fractional bytes to shift.
+ Because of the right shift, handle one byte more so that the
+ LSB won't be lost. */
+
+ int nonzero = flen[src] + 1;
+
+ /* If the LSB is in the T flag and there are no fractional
+ bits, the high byte is zero and no shift needed. */
+
+ if (flen[src] == 0 && sign_in_T)
+ nonzero = 0;
+
+ start = flen[dest] - 1;
+ end = start - nonzero;
+
+ for (i = start; i > end && i >= 0; i--)
+ {
+ xop[4] = GEN_INT (8 * i);
+ if (i == start && !sign_in_Carry)
+ avr_asm_len ("lsr %T0%t4", xop, plen, 1);
+ else
+ avr_asm_len ("ror %T0%t4", xop, plen, 1);
+ }
+
+ if (sign_in_T)
+ {
+ avr_asm_len ("bld %T0%T2", xop, plen, 1);
+ }
+ }
+ else if (sbit[src] && !ilen[src] && (!sbit[dest] || ilen[dest]))
+ {
+ /* If source was a signed fract and dest was not, shift 1 bit
+ other way. */
+
+ start = flen[dest] - flen[src];
+
+ if (start < 0)
+ start = 0;
+
+ for (i = start; i < flen[dest]; i++)
+ {
+ xop[4] = GEN_INT (8 * i);
+
+ if (i == start)
+ avr_asm_len ("lsl %T0%t4", xop, plen, 1);
+ else
+ avr_asm_len ("rol %T0%t4", xop, plen, 1);
+ }
+ }
+
+ return "";
+}
+
+
/* Modifies the length assigned to instruction INSN
LEN is the initially computed length of the insn. */
@@ -6489,6 +6996,8 @@ adjust_insn_length (rtx insn, int len)
case ADJUST_LEN_OUT_PLUS: avr_out_plus (op, &len, NULL); break;
case ADJUST_LEN_PLUS64: avr_out_plus64 (op[0], &len); break;
+ case ADJUST_LEN_MINUS: avr_out_minus (op, &len, NULL); break;
+ case ADJUST_LEN_MINUS64: avr_out_minus64 (op[0], &len); break;
case ADJUST_LEN_OUT_PLUS_NOCLOBBER:
avr_out_plus_noclobber (op, &len, NULL); break;
@@ -6502,6 +7011,9 @@ adjust_insn_length (rtx insn, int len)
case ADJUST_LEN_XLOAD: avr_out_xload (insn, op, &len); break;
case ADJUST_LEN_LOAD_LPM: avr_load_lpm (insn, op, &len); break;
+ case ADJUST_LEN_SFRACT: avr_out_fract (insn, op, true, &len); break;
+ case ADJUST_LEN_UFRACT: avr_out_fract (insn, op, false, &len); break;
+
case ADJUST_LEN_TSTHI: avr_out_tsthi (insn, op, &len); break;
case ADJUST_LEN_TSTPSI: avr_out_tstpsi (insn, op, &len); break;
case ADJUST_LEN_TSTSI: avr_out_tstsi (insn, op, &len); break;
@@ -6683,6 +7195,20 @@ avr_assemble_integer (rtx x, unsigned int size, int aligned_p)
return true;
}
+ else if (CONST_FIXED_P (x))
+ {
+ unsigned n;
+
+ /* varasm fails to handle big fixed modes that don't fit in hwi. */
+
+ for (n = 0; n < size; n++)
+ {
+ rtx xn = simplify_gen_subreg (QImode, x, GET_MODE (x), n);
+ default_assemble_integer (xn, 1, aligned_p);
+ }
+
+ return true;
+ }
return default_assemble_integer (x, size, aligned_p);
}
@@ -7489,6 +8015,7 @@ avr_operand_rtx_cost (rtx x, enum machine_mode mode, enum rtx_code outer,
return 0;
case CONST_INT:
+ case CONST_FIXED:
case CONST_DOUBLE:
return COSTS_N_INSNS (GET_MODE_SIZE (mode));
@@ -7518,6 +8045,7 @@ avr_rtx_costs_1 (rtx x, int codearg, int outer_code ATTRIBUTE_UNUSED,
switch (code)
{
case CONST_INT:
+ case CONST_FIXED:
case CONST_DOUBLE:
case SYMBOL_REF:
case CONST:
@@ -8446,11 +8974,17 @@ avr_compare_pattern (rtx insn)
if (pattern
&& NONJUMP_INSN_P (insn)
&& SET_DEST (pattern) == cc0_rtx
- && GET_CODE (SET_SRC (pattern)) == COMPARE
- && DImode != GET_MODE (XEXP (SET_SRC (pattern), 0))
- && DImode != GET_MODE (XEXP (SET_SRC (pattern), 1)))
+ && GET_CODE (SET_SRC (pattern)) == COMPARE)
{
- return pattern;
+ enum machine_mode mode0 = GET_MODE (XEXP (SET_SRC (pattern), 0));
+ enum machine_mode mode1 = GET_MODE (XEXP (SET_SRC (pattern), 1));
+
+ /* The 64-bit comparisons have fixed operands ACC_A and ACC_B.
+ They must not be swapped, thus skip them. */
+
+ if ((mode0 == VOIDmode || GET_MODE_SIZE (mode0) <= 4)
+ && (mode1 == VOIDmode || GET_MODE_SIZE (mode1) <= 4))
+ return pattern;
}
return NULL_RTX;
@@ -8788,6 +9322,8 @@ avr_2word_insn_p (rtx insn)
return false;
case CODE_FOR_movqi_insn:
+ case CODE_FOR_movuqq_insn:
+ case CODE_FOR_movqq_insn:
{
rtx set = single_set (insn);
rtx src = SET_SRC (set);
@@ -8796,7 +9332,7 @@ avr_2word_insn_p (rtx insn)
/* Factor out LDS and STS from movqi_insn. */
if (MEM_P (dest)
- && (REG_P (src) || src == const0_rtx))
+ && (REG_P (src) || src == CONST0_RTX (GET_MODE (dest))))
{
return CONSTANT_ADDRESS_P (XEXP (dest, 0));
}
@@ -9021,7 +9557,7 @@ output_reload_in_const (rtx *op, rtx clobber_reg, int *len, bool clear_p)
if (NULL_RTX == clobber_reg
&& !test_hard_reg_class (LD_REGS, dest)
- && (! (CONST_INT_P (src) || CONST_DOUBLE_P (src))
+ && (! (CONST_INT_P (src) || CONST_FIXED_P (src) || CONST_DOUBLE_P (src))
|| !avr_popcount_each_byte (src, n_bytes,
(1 << 0) | (1 << 1) | (1 << 8))))
{
@@ -9048,6 +9584,7 @@ output_reload_in_const (rtx *op, rtx clobber_reg, int *len, bool clear_p)
ldreg_p = test_hard_reg_class (LD_REGS, xdest[n]);
if (!CONST_INT_P (src)
+ && !CONST_FIXED_P (src)
&& !CONST_DOUBLE_P (src))
{
static const char* const asm_code[][2] =
@@ -9239,6 +9776,7 @@ output_reload_insisf (rtx *op, rtx clobber_reg, int *len)
if (AVR_HAVE_MOVW
&& !test_hard_reg_class (LD_REGS, op[0])
&& (CONST_INT_P (op[1])
+ || CONST_FIXED_P (op[1])
|| CONST_DOUBLE_P (op[1])))
{
int len_clr, len_noclr;
@@ -10834,6 +11372,12 @@ avr_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED, tree *arg,
#undef TARGET_SCALAR_MODE_SUPPORTED_P
#define TARGET_SCALAR_MODE_SUPPORTED_P avr_scalar_mode_supported_p
+#undef TARGET_BUILD_BUILTIN_VA_LIST
+#define TARGET_BUILD_BUILTIN_VA_LIST avr_build_builtin_va_list
+
+#undef TARGET_FIXED_POINT_SUPPORTED_P
+#define TARGET_FIXED_POINT_SUPPORTED_P hook_bool_void_true
+
#undef TARGET_ADDR_SPACE_SUBSET_P
#define TARGET_ADDR_SPACE_SUBSET_P avr_addr_space_subset_p