aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Biener <rguenther@suse.de>2013-05-03 11:09:59 +0000
committerRichard Biener <rguenth@gcc.gnu.org>2013-05-03 11:09:59 +0000
commit07bfc9ece12554016cf8c39b17301ab94df6f176 (patch)
tree7d533f06f94bafb5dd09f062d0ef17a1ab83a989
parent7769bb64f35fea465d47288bcc8dae542257162f (diff)
downloadgcc-07bfc9ece12554016cf8c39b17301ab94df6f176.zip
gcc-07bfc9ece12554016cf8c39b17301ab94df6f176.tar.gz
gcc-07bfc9ece12554016cf8c39b17301ab94df6f176.tar.bz2
double-int.h (lshift): New overload without precision and arith argument.
2013-05-03 Richard Biener <rguenther@suse.de> * double-int.h (lshift): New overload without precision and arith argument. (operator *=, operator +=, operator -=): Move ... * double-int.c (operator *=, operator +=, operator -=): ... here and implement more efficiently. (mul_double_with_sign): Remove. (lshift_double): Adjust to take unsinged shift argument, push dispatching code to callers. (mul_double_wide_with_sign): Add early out for callers that are not interested in high parts or overflow. (lshift): New function. (lshift, rshift, alshift, arshift, llshift, lrshift): Add dispatch code here. (lrotate, rrotate): Use logical shifts. * expr.c (get_inner_reference): Use lshift. * fixed-value.c (do_fixed_divide): Likewise. * tree-dfa.c (get_ref_base_and_extent): Likewise. * tree-ssa-alias.c (indirect_ref_may_alias_decl_p): Likewise. (indirect_refs_may_alias_p): Likewise. (stmt_kills_ref_p_1): Likewise. From-SVN: r198576
-rw-r--r--gcc/ChangeLog23
-rw-r--r--gcc/double-int.c154
-rw-r--r--gcc/double-int.h22
-rw-r--r--gcc/expr.c8
-rw-r--r--gcc/fixed-value.c6
-rw-r--r--gcc/tree-dfa.c20
-rw-r--r--gcc/tree-ssa-alias.c26
7 files changed, 151 insertions, 108 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 6894860..ad545ae 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,26 @@
+2013-05-03 Richard Biener <rguenther@suse.de>
+
+ * double-int.h (lshift): New overload without precision
+ and arith argument.
+ (operator *=, operator +=, operator -=): Move ...
+ * double-int.c (operator *=, operator +=, operator -=): ... here
+ and implement more efficiently.
+ (mul_double_with_sign): Remove.
+ (lshift_double): Adjust to take unsinged shift argument, push
+ dispatching code to callers.
+ (mul_double_wide_with_sign): Add early out for callers that
+ are not interested in high parts or overflow.
+ (lshift): New function.
+ (lshift, rshift, alshift, arshift, llshift, lrshift): Add
+ dispatch code here.
+ (lrotate, rrotate): Use logical shifts.
+ * expr.c (get_inner_reference): Use lshift.
+ * fixed-value.c (do_fixed_divide): Likewise.
+ * tree-dfa.c (get_ref_base_and_extent): Likewise.
+ * tree-ssa-alias.c (indirect_ref_may_alias_decl_p): Likewise.
+ (indirect_refs_may_alias_p): Likewise.
+ (stmt_kills_ref_p_1): Likewise.
+
2013-05-03 Vidya Praveen <vidyapraveen@arm.com>
* config/aarch64/aarch64-simd.md (simd_fabd): Correct the description.
diff --git a/gcc/double-int.c b/gcc/double-int.c
index 918ce22..b098f57 100644
--- a/gcc/double-int.c
+++ b/gcc/double-int.c
@@ -34,11 +34,6 @@ static int add_double_with_sign (unsigned HOST_WIDE_INT, HOST_WIDE_INT,
static int neg_double (unsigned HOST_WIDE_INT, HOST_WIDE_INT,
unsigned HOST_WIDE_INT *, HOST_WIDE_INT *);
-static int mul_double_with_sign (unsigned HOST_WIDE_INT, HOST_WIDE_INT,
- unsigned HOST_WIDE_INT, HOST_WIDE_INT,
- unsigned HOST_WIDE_INT *, HOST_WIDE_INT *,
- bool);
-
static int mul_double_wide_with_sign (unsigned HOST_WIDE_INT, HOST_WIDE_INT,
unsigned HOST_WIDE_INT, HOST_WIDE_INT,
unsigned HOST_WIDE_INT *, HOST_WIDE_INT *,
@@ -46,11 +41,7 @@ static int mul_double_wide_with_sign (unsigned HOST_WIDE_INT, HOST_WIDE_INT,
bool);
#define mul_double(l1,h1,l2,h2,lv,hv) \
- mul_double_with_sign (l1, h1, l2, h2, lv, hv, false)
-
-static void lshift_double (unsigned HOST_WIDE_INT, HOST_WIDE_INT,
- HOST_WIDE_INT, unsigned int,
- unsigned HOST_WIDE_INT *, HOST_WIDE_INT *, bool);
+ mul_double_wide_with_sign (l1, h1, l2, h2, lv, hv, NULL, NULL, false)
static int div_and_round_double (unsigned, int, unsigned HOST_WIDE_INT,
HOST_WIDE_INT, unsigned HOST_WIDE_INT,
@@ -158,25 +149,13 @@ neg_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
}
}
-/* Multiply two doubleword integers with doubleword result.
+/* Multiply two doubleword integers with quadword result.
Return nonzero if the operation overflows according to UNSIGNED_P.
Each argument is given as two `HOST_WIDE_INT' pieces.
One argument is L1 and H1; the other, L2 and H2.
- The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
-
-static int
-mul_double_with_sign (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
- unsigned HOST_WIDE_INT l2, HOST_WIDE_INT h2,
- unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv,
- bool unsigned_p)
-{
- unsigned HOST_WIDE_INT toplow;
- HOST_WIDE_INT tophigh;
-
- return mul_double_wide_with_sign (l1, h1, l2, h2,
- lv, hv, &toplow, &tophigh,
- unsigned_p);
-}
+ The value is stored as four `HOST_WIDE_INT' pieces in *LV and *HV,
+ *LW and *HW.
+ If lw is NULL then only the low part and no overflow is computed. */
static int
mul_double_wide_with_sign (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
@@ -215,6 +194,11 @@ mul_double_wide_with_sign (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
}
decode (prod, lv, hv);
+
+ /* We are not interested in the wide part nor in overflow. */
+ if (lw == NULL)
+ return 0;
+
decode (prod + 4, lw, hw);
/* Unsigned overflow is immediate. */
@@ -306,17 +290,11 @@ rshift_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
static void
lshift_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
- HOST_WIDE_INT count, unsigned int prec,
- unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv, bool arith)
+ unsigned HOST_WIDE_INT count, unsigned int prec,
+ unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv)
{
unsigned HOST_WIDE_INT signmask;
- if (count < 0)
- {
- rshift_double (l1, h1, absu_hwi (count), prec, lv, hv, arith);
- return;
- }
-
if (SHIFT_COUNT_TRUNCATED)
count %= prec;
@@ -832,6 +810,15 @@ double_int::operator * (double_int b) const
return ret;
}
+/* Multiplies *this with B and returns a reference to *this. */
+
+double_int &
+double_int::operator *= (double_int b)
+{
+ mul_double (low, high, b.low, b.high, &low, &high);
+ return *this;
+}
+
/* Returns A * B. If the operation overflows according to UNSIGNED_P,
*OVERFLOW is set to nonzero. */
@@ -839,9 +826,10 @@ double_int
double_int::mul_with_sign (double_int b, bool unsigned_p, bool *overflow) const
{
const double_int &a = *this;
- double_int ret;
- *overflow = mul_double_with_sign (a.low, a.high, b.low, b.high,
- &ret.low, &ret.high, unsigned_p);
+ double_int ret, tem;
+ *overflow = mul_double_wide_with_sign (a.low, a.high, b.low, b.high,
+ &ret.low, &ret.high,
+ &tem.low, &tem.high, unsigned_p);
return ret;
}
@@ -869,6 +857,16 @@ double_int::operator + (double_int b) const
return ret;
}
+/* Adds B to *this and returns a reference to *this. */
+
+double_int &
+double_int::operator += (double_int b)
+{
+ add_double (low, high, b.low, b.high, &low, &high);
+ return *this;
+}
+
+
/* Returns A + B. If the operation overflows according to UNSIGNED_P,
*OVERFLOW is set to nonzero. */
@@ -894,6 +892,17 @@ double_int::operator - (double_int b) const
return ret;
}
+/* Subtracts B from *this and returns a reference to *this. */
+
+double_int &
+double_int::operator -= (double_int b)
+{
+ neg_double (b.low, b.high, &b.low, &b.high);
+ add_double (low, high, b.low, b.high, &low, &high);
+ return *this;
+}
+
+
/* Returns A - B. If the operation overflows via inconsistent sign bits,
*OVERFLOW is set to nonzero. */
@@ -1076,6 +1085,37 @@ double_int::trailing_zeros () const
return bits;
}
+/* Shift A left by COUNT places. */
+
+double_int
+double_int::lshift (HOST_WIDE_INT count) const
+{
+ double_int ret;
+
+ gcc_checking_assert (count >= 0);
+
+ if (count >= HOST_BITS_PER_DOUBLE_INT)
+ {
+ /* Shifting by the host word size is undefined according to the
+ ANSI standard, so we must handle this as a special case. */
+ ret.high = 0;
+ ret.low = 0;
+ }
+ else if (count >= HOST_BITS_PER_WIDE_INT)
+ {
+ ret.high = low << (count - HOST_BITS_PER_WIDE_INT);
+ ret.low = 0;
+ }
+ else
+ {
+ ret.high = (((unsigned HOST_WIDE_INT) high << count)
+ | (low >> (HOST_BITS_PER_WIDE_INT - count - 1) >> 1));
+ ret.low = low << count;
+ }
+
+ return ret;
+}
+
/* Shift A left by COUNT places keeping only PREC bits of result. Shift
right if COUNT is negative. ARITH true specifies arithmetic shifting;
otherwise use logical shift. */
@@ -1083,9 +1123,11 @@ double_int::trailing_zeros () const
double_int
double_int::lshift (HOST_WIDE_INT count, unsigned int prec, bool arith) const
{
- const double_int &a = *this;
double_int ret;
- lshift_double (a.low, a.high, count, prec, &ret.low, &ret.high, arith);
+ if (count > 0)
+ lshift_double (low, high, count, prec, &ret.low, &ret.high);
+ else
+ rshift_double (low, high, absu_hwi (count), prec, &ret.low, &ret.high, arith);
return ret;
}
@@ -1096,9 +1138,11 @@ double_int::lshift (HOST_WIDE_INT count, unsigned int prec, bool arith) const
double_int
double_int::rshift (HOST_WIDE_INT count, unsigned int prec, bool arith) const
{
- const double_int &a = *this;
double_int ret;
- lshift_double (a.low, a.high, -count, prec, &ret.low, &ret.high, arith);
+ if (count > 0)
+ rshift_double (low, high, count, prec, &ret.low, &ret.high, arith);
+ else
+ lshift_double (low, high, absu_hwi (count), prec, &ret.low, &ret.high);
return ret;
}
@@ -1109,7 +1153,10 @@ double_int
double_int::alshift (HOST_WIDE_INT count, unsigned int prec) const
{
double_int r;
- lshift_double (low, high, count, prec, &r.low, &r.high, true);
+ if (count > 0)
+ lshift_double (low, high, count, prec, &r.low, &r.high);
+ else
+ rshift_double (low, high, absu_hwi (count), prec, &r.low, &r.high, true);
return r;
}
@@ -1120,7 +1167,10 @@ double_int
double_int::arshift (HOST_WIDE_INT count, unsigned int prec) const
{
double_int r;
- lshift_double (low, high, -count, prec, &r.low, &r.high, true);
+ if (count > 0)
+ rshift_double (low, high, count, prec, &r.low, &r.high, true);
+ else
+ lshift_double (low, high, absu_hwi (count), prec, &r.low, &r.high);
return r;
}
@@ -1131,7 +1181,10 @@ double_int
double_int::llshift (HOST_WIDE_INT count, unsigned int prec) const
{
double_int r;
- lshift_double (low, high, count, prec, &r.low, &r.high, false);
+ if (count > 0)
+ lshift_double (low, high, count, prec, &r.low, &r.high);
+ else
+ rshift_double (low, high, absu_hwi (count), prec, &r.low, &r.high, false);
return r;
}
@@ -1142,7 +1195,10 @@ double_int
double_int::lrshift (HOST_WIDE_INT count, unsigned int prec) const
{
double_int r;
- lshift_double (low, high, -count, prec, &r.low, &r.high, false);
+ if (count > 0)
+ rshift_double (low, high, count, prec, &r.low, &r.high, false);
+ else
+ lshift_double (low, high, absu_hwi (count), prec, &r.low, &r.high);
return r;
}
@@ -1158,8 +1214,8 @@ double_int::lrotate (HOST_WIDE_INT count, unsigned int prec) const
if (count < 0)
count += prec;
- t1 = this->lshift (count, prec, false);
- t2 = this->rshift (prec - count, prec, false);
+ t1 = this->llshift (count, prec);
+ t2 = this->lrshift (prec - count, prec);
return t1 | t2;
}
@@ -1176,8 +1232,8 @@ double_int::rrotate (HOST_WIDE_INT count, unsigned int prec) const
if (count < 0)
count += prec;
- t1 = this->rshift (count, prec, false);
- t2 = this->lshift (prec - count, prec, false);
+ t1 = this->lrshift (count, prec);
+ t2 = this->llshift (prec - count, prec);
return t1 | t2;
}
diff --git a/gcc/double-int.h b/gcc/double-int.h
index 5c425a8..39929d2 100644
--- a/gcc/double-int.h
+++ b/gcc/double-int.h
@@ -128,6 +128,7 @@ struct double_int
double_int operator ^ (double_int) const;
double_int and_not (double_int) const;
+ double_int lshift (HOST_WIDE_INT count) const;
double_int lshift (HOST_WIDE_INT count, unsigned int prec, bool arith) const;
double_int rshift (HOST_WIDE_INT count, unsigned int prec, bool arith) const;
double_int alshift (HOST_WIDE_INT count, unsigned int prec) const;
@@ -258,27 +259,6 @@ double_int::operator -- ()
}
inline double_int &
-double_int::operator *= (double_int b)
-{
- *this = *this * b;
- return *this;
-}
-
-inline double_int &
-double_int::operator += (double_int b)
-{
- *this = *this + b;
- return *this;
-}
-
-inline double_int &
-double_int::operator -= (double_int b)
-{
- *this = *this - b;
- return *this;
-}
-
-inline double_int &
double_int::operator &= (double_int b)
{
*this = *this & b;
diff --git a/gcc/expr.c b/gcc/expr.c
index e3fb0b6..acf282e 100644
--- a/gcc/expr.c
+++ b/gcc/expr.c
@@ -6704,9 +6704,8 @@ get_inner_reference (tree exp, HOST_WIDE_INT *pbitsize,
if (!integer_zerop (off))
{
double_int boff, coff = mem_ref_offset (exp);
- boff = coff.alshift (BITS_PER_UNIT == 8
- ? 3 : exact_log2 (BITS_PER_UNIT),
- HOST_BITS_PER_DOUBLE_INT);
+ boff = coff.lshift (BITS_PER_UNIT == 8
+ ? 3 : exact_log2 (BITS_PER_UNIT));
bit_offset += boff;
}
exp = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
@@ -6732,8 +6731,7 @@ get_inner_reference (tree exp, HOST_WIDE_INT *pbitsize,
{
double_int tem = tree_to_double_int (offset);
tem = tem.sext (TYPE_PRECISION (sizetype));
- tem = tem.alshift (BITS_PER_UNIT == 8 ? 3 : exact_log2 (BITS_PER_UNIT),
- HOST_BITS_PER_DOUBLE_INT);
+ tem = tem.lshift (BITS_PER_UNIT == 8 ? 3 : exact_log2 (BITS_PER_UNIT));
tem += bit_offset;
if (tem.fits_shwi ())
{
diff --git a/gcc/fixed-value.c b/gcc/fixed-value.c
index 18ce47e..8ba7876 100644
--- a/gcc/fixed-value.c
+++ b/gcc/fixed-value.c
@@ -569,14 +569,14 @@ do_fixed_divide (FIXED_VALUE_TYPE *f, const FIXED_VALUE_TYPE *a,
int leftmost_mod = (mod.high < 0);
/* Shift left mod by 1 bit. */
- mod = mod.llshift (1, HOST_BITS_PER_DOUBLE_INT);
+ mod = mod.lshift (1);
/* Test the leftmost bit of s to add to mod. */
if (s.high < 0)
mod.low += 1;
/* Shift left quo_s by 1 bit. */
- quo_s = quo_s.llshift (1, HOST_BITS_PER_DOUBLE_INT);
+ quo_s = quo_s.lshift (1);
/* Try to calculate (mod - pos_b). */
temp = mod - pos_b;
@@ -588,7 +588,7 @@ do_fixed_divide (FIXED_VALUE_TYPE *f, const FIXED_VALUE_TYPE *a,
}
/* Shift left s by 1 bit. */
- s = s.llshift (1, HOST_BITS_PER_DOUBLE_INT);
+ s = s.lshift (1);
}
diff --git a/gcc/tree-dfa.c b/gcc/tree-dfa.c
index 23fae4f..57aae95 100644
--- a/gcc/tree-dfa.c
+++ b/gcc/tree-dfa.c
@@ -433,9 +433,8 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset,
if (this_offset && TREE_CODE (this_offset) == INTEGER_CST)
{
double_int doffset = tree_to_double_int (this_offset);
- doffset = doffset.alshift (BITS_PER_UNIT == 8
- ? 3 : exact_log2 (BITS_PER_UNIT),
- HOST_BITS_PER_DOUBLE_INT);
+ doffset = doffset.lshift (BITS_PER_UNIT == 8
+ ? 3 : exact_log2 (BITS_PER_UNIT));
doffset += tree_to_double_int (DECL_FIELD_BIT_OFFSET (field));
bit_offset = bit_offset + doffset;
@@ -501,9 +500,8 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset,
= (TREE_INT_CST (index) - TREE_INT_CST (low_bound))
.sext (TYPE_PRECISION (TREE_TYPE (index)));
doffset *= tree_to_double_int (unit_size);
- doffset = doffset.alshift (BITS_PER_UNIT == 8
- ? 3 : exact_log2 (BITS_PER_UNIT),
- HOST_BITS_PER_DOUBLE_INT);
+ doffset = doffset.lshift (BITS_PER_UNIT == 8
+ ? 3 : exact_log2 (BITS_PER_UNIT));
bit_offset = bit_offset + doffset;
/* An array ref with a constant index up in the structure
@@ -552,9 +550,8 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset,
else
{
double_int off = mem_ref_offset (exp);
- off = off.alshift (BITS_PER_UNIT == 8
- ? 3 : exact_log2 (BITS_PER_UNIT),
- HOST_BITS_PER_DOUBLE_INT);
+ off = off.lshift (BITS_PER_UNIT == 8
+ ? 3 : exact_log2 (BITS_PER_UNIT));
off = off + bit_offset;
if (off.fits_shwi ())
{
@@ -583,9 +580,8 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset,
else
{
double_int off = mem_ref_offset (exp);
- off = off.alshift (BITS_PER_UNIT == 8
- ? 3 : exact_log2 (BITS_PER_UNIT),
- HOST_BITS_PER_DOUBLE_INT);
+ off = off.lshift (BITS_PER_UNIT == 8
+ ? 3 : exact_log2 (BITS_PER_UNIT));
off += bit_offset;
if (off.fits_shwi ())
{
diff --git a/gcc/tree-ssa-alias.c b/gcc/tree-ssa-alias.c
index 70e9e00..971a347 100644
--- a/gcc/tree-ssa-alias.c
+++ b/gcc/tree-ssa-alias.c
@@ -882,9 +882,7 @@ indirect_ref_may_alias_decl_p (tree ref1 ATTRIBUTE_UNUSED, tree base1,
/* The offset embedded in MEM_REFs can be negative. Bias them
so that the resulting offset adjustment is positive. */
moff = mem_ref_offset (base1);
- moff = moff.alshift (BITS_PER_UNIT == 8
- ? 3 : exact_log2 (BITS_PER_UNIT),
- HOST_BITS_PER_DOUBLE_INT);
+ moff = moff.lshift (BITS_PER_UNIT == 8 ? 3 : exact_log2 (BITS_PER_UNIT));
if (moff.is_negative ())
offset2p += (-moff).low;
else
@@ -960,9 +958,7 @@ indirect_ref_may_alias_decl_p (tree ref1 ATTRIBUTE_UNUSED, tree base1,
|| TREE_CODE (dbase2) == TARGET_MEM_REF)
{
double_int moff = mem_ref_offset (dbase2);
- moff = moff.alshift (BITS_PER_UNIT == 8
- ? 3 : exact_log2 (BITS_PER_UNIT),
- HOST_BITS_PER_DOUBLE_INT);
+ moff = moff.lshift (BITS_PER_UNIT == 8 ? 3 : exact_log2 (BITS_PER_UNIT));
if (moff.is_negative ())
doffset1 -= (-moff).low;
else
@@ -1056,17 +1052,13 @@ indirect_refs_may_alias_p (tree ref1 ATTRIBUTE_UNUSED, tree base1,
/* The offset embedded in MEM_REFs can be negative. Bias them
so that the resulting offset adjustment is positive. */
moff = mem_ref_offset (base1);
- moff = moff.alshift (BITS_PER_UNIT == 8
- ? 3 : exact_log2 (BITS_PER_UNIT),
- HOST_BITS_PER_DOUBLE_INT);
+ moff = moff.lshift (BITS_PER_UNIT == 8 ? 3 : exact_log2 (BITS_PER_UNIT));
if (moff.is_negative ())
offset2 += (-moff).low;
else
offset1 += moff.low;
moff = mem_ref_offset (base2);
- moff = moff.alshift (BITS_PER_UNIT == 8
- ? 3 : exact_log2 (BITS_PER_UNIT),
- HOST_BITS_PER_DOUBLE_INT);
+ moff = moff.lshift (BITS_PER_UNIT == 8 ? 3 : exact_log2 (BITS_PER_UNIT));
if (moff.is_negative ())
offset1 += (-moff).low;
else
@@ -2014,14 +2006,12 @@ stmt_kills_ref_p_1 (gimple stmt, ao_ref *ref)
TREE_OPERAND (ref->base, 0)))
{
double_int off1 = mem_ref_offset (base);
- off1 = off1.alshift (BITS_PER_UNIT == 8
- ? 3 : exact_log2 (BITS_PER_UNIT),
- HOST_BITS_PER_DOUBLE_INT);
+ off1 = off1.lshift (BITS_PER_UNIT == 8
+ ? 3 : exact_log2 (BITS_PER_UNIT));
off1 = off1 + double_int::from_shwi (offset);
double_int off2 = mem_ref_offset (ref->base);
- off2 = off2.alshift (BITS_PER_UNIT == 8
- ? 3 : exact_log2 (BITS_PER_UNIT),
- HOST_BITS_PER_DOUBLE_INT);
+ off2 = off2.lshift (BITS_PER_UNIT == 8
+ ? 3 : exact_log2 (BITS_PER_UNIT));
off2 = off2 + double_int::from_shwi (ref_offset);
if (off1.fits_shwi () && off2.fits_shwi ())
{