aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--gcc/rust/Make-lang.in1
-rw-r--r--gcc/rust/ast/rust-desugar-for-loops.cc204
-rw-r--r--gcc/rust/ast/rust-desugar-for-loops.h108
-rw-r--r--gcc/rust/hir/rust-ast-lower-expr.cc2
-rw-r--r--gcc/testsuite/rust/compile/for-loop1.rs543
-rw-r--r--gcc/testsuite/rust/compile/for-loop2.rs545
-rw-r--r--gcc/testsuite/rust/compile/nr2/exclude2
-rw-r--r--gcc/testsuite/rust/execute/torture/for-loop1.rs545
-rw-r--r--gcc/testsuite/rust/execute/torture/for-loop2.rs544
9 files changed, 2493 insertions, 1 deletions
diff --git a/gcc/rust/Make-lang.in b/gcc/rust/Make-lang.in
index dc03a72..5ddad25 100644
--- a/gcc/rust/Make-lang.in
+++ b/gcc/rust/Make-lang.in
@@ -235,6 +235,7 @@ GRS_OBJS = \
rust/rust-expand-format-args.o \
rust/rust-lang-item.o \
rust/rust-collect-lang-items.o \
+ rust/rust-desugar-for-loops.o \
$(END)
# removed object files from here
diff --git a/gcc/rust/ast/rust-desugar-for-loops.cc b/gcc/rust/ast/rust-desugar-for-loops.cc
new file mode 100644
index 0000000..5e5cbbc
--- /dev/null
+++ b/gcc/rust/ast/rust-desugar-for-loops.cc
@@ -0,0 +1,204 @@
+// Copyright (C) 2025 Free Software Foundation, Inc.
+
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#include "rust-desugar-for-loops.h"
+#include "rust-ast-visitor.h"
+#include "rust-ast.h"
+#include "rust-hir-map.h"
+#include "rust-path.h"
+#include "rust-pattern.h"
+#include "rust-stmt.h"
+#include "rust-expr.h"
+#include "rust-ast-builder.h"
+
+namespace Rust {
+namespace AST {
+
+DesugarForLoops::DesugarForLoops () {}
+
+void
+DesugarForLoops::go (AST::Crate &crate)
+{
+ DefaultASTVisitor::visit (crate);
+}
+
+static void
+replace_for_loop (std::unique_ptr<Expr> &for_loop,
+ std::unique_ptr<Expr> &&expanded)
+{
+ for_loop = std::move (expanded);
+}
+
+MatchArm
+DesugarForLoops::DesugarCtx::make_match_arm (std::unique_ptr<Pattern> &&path)
+{
+ auto patterns = std::vector<std::unique_ptr<Pattern>> ();
+ patterns.emplace_back (std::move (path));
+
+ return MatchArm (std::move (patterns), loc);
+}
+
+MatchCase
+DesugarForLoops::DesugarCtx::make_break_arm ()
+{
+ auto arm = make_match_arm (std::unique_ptr<Pattern> (new PathInExpression (
+ builder.path_in_expression (LangItem::Kind::OPTION_NONE))));
+
+ auto break_expr = std::unique_ptr<Expr> (
+ new BreakExpr (Lifetime::error (), nullptr, {}, loc));
+
+ return MatchCase (std::move (arm), std::move (break_expr));
+}
+
+MatchCase
+DesugarForLoops::DesugarCtx::make_continue_arm ()
+{
+ auto val = builder.identifier_pattern (DesugarCtx::continue_pattern_id);
+
+ auto patterns = std::vector<std::unique_ptr<Pattern>> ();
+ patterns.emplace_back (std::move (val));
+
+ auto pattern_item = std::unique_ptr<TupleStructItems> (
+ new TupleStructItemsNoRange (std::move (patterns)));
+ auto pattern = std::unique_ptr<Pattern> (new TupleStructPattern (
+ builder.path_in_expression (LangItem::Kind::OPTION_SOME),
+ std::move (pattern_item)));
+
+ auto val_arm = make_match_arm (std::move (pattern));
+
+ auto next = builder.identifier (DesugarCtx::next_value_id);
+
+ auto assignment = std::unique_ptr<Expr> (
+ new AssignmentExpr (std::move (next),
+ builder.identifier (DesugarCtx::continue_pattern_id),
+ {}, loc));
+
+ return MatchCase (std::move (val_arm), std::move (assignment));
+}
+
+std::unique_ptr<Stmt>
+DesugarForLoops::DesugarCtx::statementify (std::unique_ptr<Expr> &&expr)
+{
+ return std::unique_ptr<Stmt> (new ExprStmt (std::move (expr), loc, true));
+}
+
+std::unique_ptr<Expr>
+DesugarForLoops::desugar (AST::ForLoopExpr &expr)
+{
+ auto ctx = DesugarCtx (expr.get_locus ());
+
+ auto into_iter = std::make_unique<PathInExpression> (
+ ctx.builder.path_in_expression (LangItem::Kind::INTOITER_INTOITER));
+ auto next = std::make_unique<PathInExpression> (
+ ctx.builder.path_in_expression (LangItem::Kind::ITERATOR_NEXT));
+
+ // IntoIterator::into_iter(<head>)
+ auto into_iter_call
+ = ctx.builder.call (std::move (into_iter),
+ expr.get_iterator_expr ().clone_expr ());
+
+ // Iterator::next(iter)
+ auto next_call = ctx.builder.call (
+ std::move (next),
+ ctx.builder.ref (ctx.builder.identifier (DesugarCtx::iter_id), true));
+
+ // None => break,
+ auto break_arm = ctx.make_break_arm ();
+ // Some(val) => __next = val; },
+ auto continue_arm = ctx.make_continue_arm ();
+
+ // match <next_call> {
+ // <continue_arm>
+ // <break_arm>
+ // }
+ auto match_next
+ = ctx.builder.match (std::move (next_call),
+ {std::move (continue_arm), std::move (break_arm)});
+
+ // let mut __next;
+ auto let_next = ctx.builder.let (
+ ctx.builder.identifier_pattern (DesugarCtx::next_value_id, true));
+ // let <pattern> = __next;
+ auto let_pat
+ = ctx.builder.let (expr.get_pattern ().clone_pattern (), nullptr,
+ ctx.builder.identifier (DesugarCtx::next_value_id));
+
+ auto loop_stmts = std::vector<std::unique_ptr<Stmt>> ();
+ loop_stmts.emplace_back (std::move (let_next));
+ loop_stmts.emplace_back (ctx.statementify (std::move (match_next)));
+ loop_stmts.emplace_back (std::move (let_pat));
+ loop_stmts.emplace_back (
+ ctx.statementify (expr.get_loop_block ().clone_expr ()));
+
+ // loop {
+ // <let_next>;
+ // <match_next>;
+ // <let_pat>;
+ //
+ // <body>;
+ // }
+ auto loop = ctx.builder.loop (std::move (loop_stmts));
+
+ auto mut_iter_pattern
+ = ctx.builder.identifier_pattern (DesugarCtx::iter_id, true);
+ auto match_iter
+ = ctx.builder.match (std::move (into_iter_call),
+ {ctx.builder.match_case (std::move (mut_iter_pattern),
+ std::move (loop))});
+
+ auto let_result
+ = ctx.builder.let (ctx.builder.identifier_pattern (DesugarCtx::result_id),
+ nullptr, std::move (match_iter));
+ auto result_return = ctx.builder.identifier (DesugarCtx::result_id);
+
+ return ctx.builder.block (std::move (let_result), std::move (result_return));
+}
+
+void
+DesugarForLoops::maybe_desugar_expr (std::unique_ptr<Expr> &expr)
+{
+ if (expr->get_expr_kind () == AST::Expr::Kind::Loop)
+ {
+ auto &loop = static_cast<AST::BaseLoopExpr &> (*expr);
+
+ if (loop.get_loop_kind () == AST::BaseLoopExpr::Kind::For)
+ {
+ auto &for_loop = static_cast<AST::ForLoopExpr &> (loop);
+
+ auto desugared = desugar (for_loop);
+
+ replace_for_loop (expr, std::move (desugared));
+ }
+ }
+}
+
+void
+DesugarForLoops::visit (AST::BlockExpr &block)
+{
+ for (auto &stmt : block.get_statements ())
+ if (stmt->get_stmt_kind () == AST::Stmt::Kind::Expr)
+ maybe_desugar_expr (static_cast<AST::ExprStmt &> (*stmt).get_expr_ptr ());
+
+ if (block.has_tail_expr ())
+ maybe_desugar_expr (block.get_tail_expr_ptr ());
+
+ DefaultASTVisitor::visit (block);
+}
+
+} // namespace AST
+} // namespace Rust
diff --git a/gcc/rust/ast/rust-desugar-for-loops.h b/gcc/rust/ast/rust-desugar-for-loops.h
new file mode 100644
index 0000000..7beb692
--- /dev/null
+++ b/gcc/rust/ast/rust-desugar-for-loops.h
@@ -0,0 +1,108 @@
+// Copyright (C) 2025 Free Software Foundation, Inc.
+
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#ifndef RUST_DESUGAR_FOR_LOOPS_H
+#define RUST_DESUGAR_FOR_LOOPS_H
+
+#include "rust-ast-builder.h"
+#include "rust-ast-visitor.h"
+#include "rust-expr.h"
+
+namespace Rust {
+namespace AST {
+
+// Desugar for-loops into a set of other AST nodes. The desugar is of the
+// following form:
+//
+// ```
+// for <pat> in <head> <body>
+// ```
+//
+// becomes:
+//
+// ```
+// {
+// let result = match ::std::iter::IntoIterator::into_iter(<head>) {
+// mut iter => {
+// loop {
+// let mut __next;
+// match ::std::iter::Iterator::next(&mut iter) {
+// ::std::option::Option::Some(val) => __next = val,
+// ::std::option::Option::None => break
+// };
+// let <pat> = __next;
+//
+// <body>;
+// }
+// }
+// };
+// result
+// }
+// ```
+//
+// NOTE: In a perfect world, this would be an immutable visitor which would take
+// ownership of the AST node and return a new one, instead of mutating this one
+// in place. Nevertheless, this isn't Rust, and doing immutable visitors in C++
+// sucks, and the world isn't perfect, so we are impure and sad.
+//
+// NOTE: This class could eventually be removed in favor of
+// an HIR desugar. This would avoid mutating the AST and would be cleaner.
+// However, it requires multiple changes in the way we do typechecking and name
+// resolution, as this desugar creates new bindings. Because of this, these new
+// bindings need to be inserted into the name-resolution context outside of the
+// name resolution pass, which is difficult. Those bindings are needed because
+// of the way the typechecker is currently structured, where it will fetch name
+// resolution information in order to typecheck paths - which technically isn't
+// necessary.
+class DesugarForLoops : public DefaultASTVisitor
+{
+ using DefaultASTVisitor::visit;
+
+public:
+ DesugarForLoops ();
+ void go (AST::Crate &);
+
+private:
+ struct DesugarCtx
+ {
+ DesugarCtx (location_t loc) : builder (Builder (loc)), loc (loc) {}
+
+ Builder builder;
+ location_t loc;
+
+ MatchArm make_match_arm (std::unique_ptr<Pattern> &&pattern);
+ MatchCase make_break_arm ();
+ MatchCase make_continue_arm ();
+ std::unique_ptr<Stmt> statementify (std::unique_ptr<Expr> &&expr);
+
+ constexpr static const char *continue_pattern_id = "#val";
+ constexpr static const char *next_value_id = "#__next";
+ constexpr static const char *iter_id = "#iter";
+ constexpr static const char *result_id = "#result";
+ };
+
+ std::unique_ptr<Expr> desugar (AST::ForLoopExpr &expr);
+ void maybe_desugar_expr (std::unique_ptr<Expr> &expr);
+
+ void visit (AST::BlockExpr &) override;
+};
+
+} // namespace AST
+} // namespace Rust
+
+#endif // ! RUST_DESUGAR_FOR_LOOPS_H
diff --git a/gcc/rust/hir/rust-ast-lower-expr.cc b/gcc/rust/hir/rust-ast-lower-expr.cc
index 7ccb251..4603bfc 100644
--- a/gcc/rust/hir/rust-ast-lower-expr.cc
+++ b/gcc/rust/hir/rust-ast-lower-expr.cc
@@ -591,7 +591,7 @@ ASTLoweringExpr::visit (AST::WhileLoopExpr &expr)
void
ASTLoweringExpr::visit (AST::ForLoopExpr &expr)
{
- translated = ASTLoweringExprWithBlock::translate (expr, &terminated);
+ rust_unreachable ();
}
void
diff --git a/gcc/testsuite/rust/compile/for-loop1.rs b/gcc/testsuite/rust/compile/for-loop1.rs
new file mode 100644
index 0000000..1023ecd
--- /dev/null
+++ b/gcc/testsuite/rust/compile/for-loop1.rs
@@ -0,0 +1,543 @@
+// { dg-output "loop\r*\nloop\r*\n" }
+#![feature(intrinsics)]
+
+pub use option::Option::{self, None, Some};
+pub use result::Result::{self, Err, Ok};
+
+extern "C" {
+ fn printf(s: *const i8, ...);
+ fn puts(s: *const i8);
+}
+
+mod option {
+ pub enum Option<T> {
+ #[lang = "None"]
+ None,
+ #[lang = "Some"]
+ Some(T),
+ }
+}
+
+mod result {
+ enum Result<T, E> {
+ Ok(T),
+ Err(E),
+ }
+}
+
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "clone"]
+pub trait Clone: Sized {
+ fn clone(&self) -> Self;
+
+ fn clone_from(&mut self, source: &Self) {
+ *self = source.clone()
+ }
+}
+
+mod impls {
+ use super::Clone;
+
+ macro_rules! impl_clone {
+ ($($t:ty)*) => {
+ $(
+ impl Clone for $t {
+ fn clone(&self) -> Self {
+ *self
+ }
+ }
+ )*
+ }
+ }
+
+ impl_clone! {
+ usize u8 u16 u32 u64 // u128
+ isize i8 i16 i32 i64 // i128
+ f32 f64
+ bool char
+ }
+}
+
+#[lang = "copy"]
+pub trait Copy: Clone {
+ // Empty.
+}
+
+mod copy_impls {
+ use super::Copy;
+
+ macro_rules! impl_copy {
+ ($($t:ty)*) => {
+ $(
+ impl Copy for $t {}
+ )*
+ }
+ }
+
+ impl_copy! {
+ usize u8 u16 u32 u64 // u128
+ isize i8 i16 i32 i64 // i128
+ f32 f64
+ bool char
+ }
+}
+
+mod intrinsics {
+ extern "rust-intrinsic" {
+ pub fn add_with_overflow<T>(x: T, y: T) -> (T, bool);
+ pub fn wrapping_add<T>(a: T, b: T) -> T;
+ pub fn wrapping_sub<T>(a: T, b: T) -> T;
+ pub fn rotate_left<T>(a: T, b: T) -> T;
+ pub fn rotate_right<T>(a: T, b: T) -> T;
+ pub fn offset<T>(ptr: *const T, count: isize) -> *const T;
+ pub fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: usize);
+ pub fn move_val_init<T>(dst: *mut T, src: T);
+ pub fn uninit<T>() -> T;
+ }
+}
+
+mod ptr {
+ #[lang = "const_ptr"]
+ impl<T> *const T {
+ pub unsafe fn offset(self, count: isize) -> *const T {
+ intrinsics::offset(self, count)
+ }
+ }
+
+ #[lang = "mut_ptr"]
+ impl<T> *mut T {
+ pub unsafe fn offset(self, count: isize) -> *mut T {
+ intrinsics::offset(self, count) as *mut T
+ }
+ }
+
+ pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
+ let x = x as *mut u8;
+ let y = y as *mut u8;
+ let len = mem::size_of::<T>() * count;
+ swap_nonoverlapping_bytes(x, y, len)
+ }
+
+ pub unsafe fn swap_nonoverlapping_one<T>(x: *mut T, y: *mut T) {
+ // For types smaller than the block optimization below,
+ // just swap directly to avoid pessimizing codegen.
+ if mem::size_of::<T>() < 32 {
+ let z = read(x);
+ intrinsics::copy_nonoverlapping(y, x, 1);
+ write(y, z);
+ } else {
+ swap_nonoverlapping(x, y, 1);
+ }
+ }
+
+ pub unsafe fn write<T>(dst: *mut T, src: T) {
+ intrinsics::move_val_init(&mut *dst, src)
+ }
+
+ pub unsafe fn read<T>(src: *const T) -> T {
+ let mut tmp: T = mem::uninitialized();
+ intrinsics::copy_nonoverlapping(src, &mut tmp, 1);
+ tmp
+ }
+
+ pub unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) {
+ struct Block(u64, u64, u64, u64);
+ struct UnalignedBlock(u64, u64, u64, u64);
+
+ let block_size = mem::size_of::<Block>();
+
+ // Loop through x & y, copying them `Block` at a time
+ // The optimizer should unroll the loop fully for most types
+ // N.B. We can't use a for loop as the `range` impl calls `mem::swap` recursively
+ let mut i: usize = 0;
+ while i + block_size <= len {
+ // Create some uninitialized memory as scratch space
+ // Declaring `t` here avoids aligning the stack when this loop is unused
+ let mut t: Block = mem::uninitialized();
+ let t = &mut t as *mut _ as *mut u8;
+ let x = x.offset(i as isize);
+ let y = y.offset(i as isize);
+
+ // Swap a block of bytes of x & y, using t as a temporary buffer
+ // This should be optimized into efficient SIMD operations where available
+ intrinsics::copy_nonoverlapping(x, t, block_size);
+ intrinsics::copy_nonoverlapping(y, x, block_size);
+ intrinsics::copy_nonoverlapping(t, y, block_size);
+ i += block_size;
+ }
+
+ if i < len {
+ // Swap any remaining bytes
+ let mut t: UnalignedBlock = mem::uninitialized();
+ let rem = len - i;
+
+ let t = &mut t as *mut _ as *mut u8;
+ let x = x.offset(i as isize);
+ let y = y.offset(i as isize);
+
+ intrinsics::copy_nonoverlapping(x, t, rem);
+ intrinsics::copy_nonoverlapping(y, x, rem);
+ intrinsics::copy_nonoverlapping(t, y, rem);
+ }
+ }
+}
+
+mod mem {
+ extern "rust-intrinsic" {
+ #[rustc_const_stable(feature = "const_transmute", since = "1.46.0")]
+ pub fn transmute<T, U>(_: T) -> U;
+ #[rustc_const_stable(feature = "const_size_of", since = "1.40.0")]
+ pub fn size_of<T>() -> usize;
+ }
+
+ pub fn swap<T>(x: &mut T, y: &mut T) {
+ unsafe {
+ ptr::swap_nonoverlapping_one(x, y);
+ }
+ }
+
+ pub fn replace<T>(dest: &mut T, mut src: T) -> T {
+ swap(dest, &mut src);
+ src
+ }
+
+ pub unsafe fn uninitialized<T>() -> T {
+ intrinsics::uninit()
+ }
+}
+
+macro_rules! impl_uint {
+ ($($ty:ident = $lang:literal),*) => {
+ $(
+ impl $ty {
+ pub fn wrapping_add(self, rhs: Self) -> Self {
+ unsafe {
+ intrinsics::wrapping_add(self, rhs)
+ }
+ }
+
+ pub fn wrapping_sub(self, rhs: Self) -> Self {
+ unsafe {
+ intrinsics::wrapping_sub(self, rhs)
+ }
+ }
+
+ pub fn rotate_left(self, n: u32) -> Self {
+ unsafe {
+ intrinsics::rotate_left(self, n as Self)
+ }
+ }
+
+ pub fn rotate_right(self, n: u32) -> Self {
+ unsafe {
+ intrinsics::rotate_right(self, n as Self)
+ }
+ }
+
+ pub fn to_le(self) -> Self {
+ #[cfg(target_endian = "little")]
+ {
+ self
+ }
+ }
+
+ pub const fn from_le_bytes(bytes: [u8; mem::size_of::<Self>()]) -> Self {
+ Self::from_le(Self::from_ne_bytes(bytes))
+ }
+
+ pub const fn from_le(x: Self) -> Self {
+ #[cfg(target_endian = "little")]
+ {
+ x
+ }
+ }
+
+ pub const fn from_ne_bytes(bytes: [u8; mem::size_of::<Self>()]) -> Self {
+ unsafe { mem::transmute(bytes) }
+ }
+
+ pub fn checked_add(self, rhs: Self) -> Option<Self> {
+ let (a, b) = self.overflowing_add(rhs);
+ if b {
+ Option::None
+ } else {
+ Option::Some(a)
+ }
+ }
+
+ pub fn overflowing_add(self, rhs: Self) -> (Self, bool) {
+ let (a, b) = unsafe { intrinsics::add_with_overflow(self as $ty, rhs as $ty) };
+ (a as Self, b)
+ }
+ }
+ )*
+ }
+}
+
+impl_uint!(
+ u8 = "u8",
+ u16 = "u16",
+ u32 = "u32",
+ u64 = "u64",
+ usize = "usize"
+);
+
+#[lang = "add"]
+pub trait Add<RHS = Self> {
+ type Output;
+
+ fn add(self, rhs: RHS) -> Self::Output;
+}
+macro_rules! add_impl {
+ ($($t:ty)*) => ($(
+ impl Add for $t {
+ type Output = $t;
+
+ fn add(self, other: $t) -> $t { self + other }
+ }
+ )*)
+}
+
+add_impl! { usize u8 u16 u32 u64 /*isize i8 i16 i32 i64*/ f32 f64 }
+
+#[lang = "sub"]
+pub trait Sub<RHS = Self> {
+ type Output;
+
+ fn sub(self, rhs: RHS) -> Self::Output;
+}
+macro_rules! sub_impl {
+ ($($t:ty)*) => ($(
+ impl Sub for $t {
+ type Output = $t;
+
+ fn sub(self, other: $t) -> $t { self - other }
+ }
+ )*)
+}
+
+sub_impl! { usize u8 u16 u32 u64 /*isize i8 i16 i32 i64*/ f32 f64 }
+
+#[lang = "Range"]
+pub struct Range<Idx> {
+ pub start: Idx,
+ pub end: Idx,
+}
+
+pub trait TryFrom<T>: Sized {
+ /// The type returned in the event of a conversion error.
+ type Error;
+
+ /// Performs the conversion.
+ fn try_from(value: T) -> Result<Self, Self::Error>;
+}
+
+pub trait From<T>: Sized {
+ fn from(_: T) -> Self;
+}
+
+impl<T> From<T> for T {
+ fn from(t: T) -> T {
+ t
+ }
+}
+
+impl<T, U> TryFrom<U> for T
+where
+ T: From<U>,
+{
+ type Error = !;
+
+ fn try_from(value: U) -> Result<Self, Self::Error> {
+ Ok(T::from(value))
+ }
+}
+
+trait Step {
+ /// Returns the number of steps between two step objects. The count is
+ /// inclusive of `start` and exclusive of `end`.
+ ///
+ /// Returns `None` if it is not possible to calculate `steps_between`
+ /// without overflow.
+ fn steps_between(start: &Self, end: &Self) -> Option<usize>;
+
+ /// Replaces this step with `1`, returning itself
+ fn replace_one(&mut self) -> Self;
+
+ /// Replaces this step with `0`, returning itself
+ fn replace_zero(&mut self) -> Self;
+
+ /// Adds one to this step, returning the result
+ fn add_one(&self) -> Self;
+
+ /// Subtracts one to this step, returning the result
+ fn sub_one(&self) -> Self;
+
+ /// Add an usize, returning None on overflow
+ fn add_usize(&self, n: usize) -> Option<Self>;
+}
+
+// These are still macro-generated because the integer literals resolve to different types.
+macro_rules! step_identical_methods {
+ () => {
+ #[inline]
+ fn replace_one(&mut self) -> Self {
+ mem::replace(self, 1)
+ }
+
+ #[inline]
+ fn replace_zero(&mut self) -> Self {
+ mem::replace(self, 0)
+ }
+
+ #[inline]
+ fn add_one(&self) -> Self {
+ Add::add(*self, 1)
+ }
+
+ #[inline]
+ fn sub_one(&self) -> Self {
+ Sub::sub(*self, 1)
+ }
+ };
+}
+
+macro_rules! step_impl_unsigned {
+ ($($t:ty)*) => ($(
+ impl Step for $t {
+ fn steps_between(start: &$t, end: &$t) -> Option<usize> {
+ if *start < *end {
+ // Note: We assume $t <= usize here
+ Option::Some((*end - *start) as usize)
+ } else {
+ Option::Some(0)
+ }
+ }
+
+ fn add_usize(&self, n: usize) -> Option<Self> {
+ match <$t>::try_from(n) {
+ Result::Ok(n_as_t) => self.checked_add(n_as_t),
+ Result::Err(_) => Option::None,
+ }
+ }
+
+ step_identical_methods!();
+ }
+ )*)
+}
+macro_rules! step_impl_signed {
+ ($( [$t:ty : $unsigned:ty] )*) => ($(
+ impl Step for $t {
+ #[inline]
+ #[allow(trivial_numeric_casts)]
+ fn steps_between(start: &$t, end: &$t) -> Option<usize> {
+ if *start < *end {
+ // Note: We assume $t <= isize here
+ // Use .wrapping_sub and cast to usize to compute the
+ // difference that may not fit inside the range of isize.
+ Option::Some((*end as isize).wrapping_sub(*start as isize) as usize)
+ } else {
+ Option::Some(0)
+ }
+ }
+
+ #[inline]
+ #[allow(unreachable_patterns)]
+ fn add_usize(&self, n: usize) -> Option<Self> {
+ match <$unsigned>::try_from(n) {
+ Result::Ok(n_as_unsigned) => {
+ // Wrapping in unsigned space handles cases like
+ // `-120_i8.add_usize(200) == Option::Some(80_i8)`,
+ // even though 200_usize is out of range for i8.
+ let wrapped = (*self as $unsigned).wrapping_add(n_as_unsigned) as $t;
+ if wrapped >= *self {
+ Option::Some(wrapped)
+ } else {
+ Option::None // Addition overflowed
+ }
+ }
+ Result::Err(_) => Option::None,
+ }
+ }
+
+ step_identical_methods!();
+ }
+ )*)
+}
+
+macro_rules! step_impl_no_between {
+ ($($t:ty)*) => ($(
+ impl Step for $t {
+ #[inline]
+ fn steps_between(_start: &Self, _end: &Self) -> Option<usize> {
+ Option::None
+ }
+
+ #[inline]
+ fn add_usize(&self, n: usize) -> Option<Self> {
+ self.checked_add(n as $t)
+ }
+
+ step_identical_methods!();
+ }
+ )*)
+}
+
+step_impl_unsigned!(usize);
+
+pub trait Iterator {
+ type Item;
+
+ #[lang = "next"]
+ fn next(&mut self) -> Option<Self::Item>;
+}
+
+impl<A: Step> Iterator for Range<A> {
+ type Item = A;
+
+ fn next(&mut self) -> Option<A> {
+ if self.start < self.end {
+ // We check for overflow here, even though it can't actually
+ // happen. Adding this check does however help llvm vectorize loops
+ // for some ranges that don't get vectorized otherwise,
+ // and this won't actually result in an extra check in an optimized build.
+ match self.start.add_usize(1) {
+ Option::Some(mut n) => {
+ mem::swap(&mut n, &mut self.start);
+ Option::Some(n)
+ }
+ Option::None => Option::None,
+ }
+ } else {
+ Option::None
+ }
+ }
+}
+
+pub trait IntoIterator {
+ type Item;
+
+ type IntoIter: Iterator<Item = Self::Item>;
+
+ #[lang = "into_iter"]
+ fn into_iter(self) -> Self::IntoIter;
+}
+
+impl<I: Iterator> IntoIterator for I {
+ type Item = I::Item;
+ type IntoIter = I;
+
+ fn into_iter(self) -> I {
+ self
+ }
+}
+
+pub fn main() {
+ let a = 1usize..3usize;
+
+ for i in a { // { dg-warning "unused name" }
+ unsafe { puts("loop\0" as *const str as *const i8); }
+ }
+}
diff --git a/gcc/testsuite/rust/compile/for-loop2.rs b/gcc/testsuite/rust/compile/for-loop2.rs
new file mode 100644
index 0000000..d18bddd
--- /dev/null
+++ b/gcc/testsuite/rust/compile/for-loop2.rs
@@ -0,0 +1,545 @@
+// { dg-output "1\r*\n2\r*\n" }
+#![feature(intrinsics)]
+
+pub use option::Option::{self, None, Some};
+pub use result::Result::{self, Err, Ok};
+
+extern "C" {
+ fn printf(s: *const i8, ...);
+ fn puts(s: *const i8);
+}
+
+mod option {
+ pub enum Option<T> {
+ #[lang = "None"]
+ None,
+ #[lang = "Some"]
+ Some(T),
+ }
+}
+
+mod result {
+ enum Result<T, E> {
+ Ok(T),
+ Err(E),
+ }
+}
+
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "clone"]
+pub trait Clone: Sized {
+ fn clone(&self) -> Self;
+
+ fn clone_from(&mut self, source: &Self) {
+ *self = source.clone()
+ }
+}
+
+mod impls {
+ use super::Clone;
+
+ macro_rules! impl_clone {
+ ($($t:ty)*) => {
+ $(
+ impl Clone for $t {
+ fn clone(&self) -> Self {
+ *self
+ }
+ }
+ )*
+ }
+ }
+
+ impl_clone! {
+ usize u8 u16 u32 u64 // u128
+ isize i8 i16 i32 i64 // i128
+ f32 f64
+ bool char
+ }
+}
+
+#[lang = "copy"]
+pub trait Copy: Clone {
+ // Empty.
+}
+
+mod copy_impls {
+ use super::Copy;
+
+ macro_rules! impl_copy {
+ ($($t:ty)*) => {
+ $(
+ impl Copy for $t {}
+ )*
+ }
+ }
+
+ impl_copy! {
+ usize u8 u16 u32 u64 // u128
+ isize i8 i16 i32 i64 // i128
+ f32 f64
+ bool char
+ }
+}
+
+mod intrinsics {
+ extern "rust-intrinsic" {
+ pub fn add_with_overflow<T>(x: T, y: T) -> (T, bool);
+ pub fn wrapping_add<T>(a: T, b: T) -> T;
+ pub fn wrapping_sub<T>(a: T, b: T) -> T;
+ pub fn rotate_left<T>(a: T, b: T) -> T;
+ pub fn rotate_right<T>(a: T, b: T) -> T;
+ pub fn offset<T>(ptr: *const T, count: isize) -> *const T;
+ pub fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: usize);
+ pub fn move_val_init<T>(dst: *mut T, src: T);
+ pub fn uninit<T>() -> T;
+ }
+}
+
+mod ptr {
+ #[lang = "const_ptr"]
+ impl<T> *const T {
+ pub unsafe fn offset(self, count: isize) -> *const T {
+ intrinsics::offset(self, count)
+ }
+ }
+
+ #[lang = "mut_ptr"]
+ impl<T> *mut T {
+ pub unsafe fn offset(self, count: isize) -> *mut T {
+ intrinsics::offset(self, count) as *mut T
+ }
+ }
+
+ pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
+ let x = x as *mut u8;
+ let y = y as *mut u8;
+ let len = mem::size_of::<T>() * count;
+ swap_nonoverlapping_bytes(x, y, len)
+ }
+
+ pub unsafe fn swap_nonoverlapping_one<T>(x: *mut T, y: *mut T) {
+ // For types smaller than the block optimization below,
+ // just swap directly to avoid pessimizing codegen.
+ if mem::size_of::<T>() < 32 {
+ let z = read(x);
+ intrinsics::copy_nonoverlapping(y, x, 1);
+ write(y, z);
+ } else {
+ swap_nonoverlapping(x, y, 1);
+ }
+ }
+
+ pub unsafe fn write<T>(dst: *mut T, src: T) {
+ intrinsics::move_val_init(&mut *dst, src)
+ }
+
+ pub unsafe fn read<T>(src: *const T) -> T {
+ let mut tmp: T = mem::uninitialized();
+ intrinsics::copy_nonoverlapping(src, &mut tmp, 1);
+ tmp
+ }
+
+ pub unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) {
+ struct Block(u64, u64, u64, u64);
+ struct UnalignedBlock(u64, u64, u64, u64);
+
+ let block_size = mem::size_of::<Block>();
+
+ // Loop through x & y, copying them `Block` at a time
+ // The optimizer should unroll the loop fully for most types
+ // N.B. We can't use a for loop as the `range` impl calls `mem::swap` recursively
+ let mut i: usize = 0;
+ while i + block_size <= len {
+ // Create some uninitialized memory as scratch space
+ // Declaring `t` here avoids aligning the stack when this loop is unused
+ let mut t: Block = mem::uninitialized();
+ let t = &mut t as *mut _ as *mut u8;
+ let x = x.offset(i as isize);
+ let y = y.offset(i as isize);
+
+ // Swap a block of bytes of x & y, using t as a temporary buffer
+ // This should be optimized into efficient SIMD operations where available
+ intrinsics::copy_nonoverlapping(x, t, block_size);
+ intrinsics::copy_nonoverlapping(y, x, block_size);
+ intrinsics::copy_nonoverlapping(t, y, block_size);
+ i += block_size;
+ }
+
+ if i < len {
+ // Swap any remaining bytes
+ let mut t: UnalignedBlock = mem::uninitialized();
+ let rem = len - i;
+
+ let t = &mut t as *mut _ as *mut u8;
+ let x = x.offset(i as isize);
+ let y = y.offset(i as isize);
+
+ intrinsics::copy_nonoverlapping(x, t, rem);
+ intrinsics::copy_nonoverlapping(y, x, rem);
+ intrinsics::copy_nonoverlapping(t, y, rem);
+ }
+ }
+}
+
+mod mem {
+ extern "rust-intrinsic" {
+ #[rustc_const_stable(feature = "const_transmute", since = "1.46.0")]
+ pub fn transmute<T, U>(_: T) -> U;
+ #[rustc_const_stable(feature = "const_size_of", since = "1.40.0")]
+ pub fn size_of<T>() -> usize;
+ }
+
+ pub fn swap<T>(x: &mut T, y: &mut T) {
+ unsafe {
+ ptr::swap_nonoverlapping_one(x, y);
+ }
+ }
+
+ pub fn replace<T>(dest: &mut T, mut src: T) -> T {
+ swap(dest, &mut src);
+ src
+ }
+
+ pub unsafe fn uninitialized<T>() -> T {
+ intrinsics::uninit()
+ }
+}
+
+macro_rules! impl_uint {
+ ($($ty:ident = $lang:literal),*) => {
+ $(
+ impl $ty {
+ pub fn wrapping_add(self, rhs: Self) -> Self {
+ unsafe {
+ intrinsics::wrapping_add(self, rhs)
+ }
+ }
+
+ pub fn wrapping_sub(self, rhs: Self) -> Self {
+ unsafe {
+ intrinsics::wrapping_sub(self, rhs)
+ }
+ }
+
+ pub fn rotate_left(self, n: u32) -> Self {
+ unsafe {
+ intrinsics::rotate_left(self, n as Self)
+ }
+ }
+
+ pub fn rotate_right(self, n: u32) -> Self {
+ unsafe {
+ intrinsics::rotate_right(self, n as Self)
+ }
+ }
+
+ pub fn to_le(self) -> Self {
+ #[cfg(target_endian = "little")]
+ {
+ self
+ }
+ }
+
+ pub const fn from_le_bytes(bytes: [u8; mem::size_of::<Self>()]) -> Self {
+ Self::from_le(Self::from_ne_bytes(bytes))
+ }
+
+ pub const fn from_le(x: Self) -> Self {
+ #[cfg(target_endian = "little")]
+ {
+ x
+ }
+ }
+
+ pub const fn from_ne_bytes(bytes: [u8; mem::size_of::<Self>()]) -> Self {
+ unsafe { mem::transmute(bytes) }
+ }
+
+ pub fn checked_add(self, rhs: Self) -> Option<Self> {
+ let (a, b) = self.overflowing_add(rhs);
+ if b {
+ Option::None
+ } else {
+ Option::Some(a)
+ }
+ }
+
+ pub fn overflowing_add(self, rhs: Self) -> (Self, bool) {
+ let (a, b) = unsafe { intrinsics::add_with_overflow(self as $ty, rhs as $ty) };
+ (a as Self, b)
+ }
+ }
+ )*
+ }
+}
+
+impl_uint!(
+ u8 = "u8",
+ u16 = "u16",
+ u32 = "u32",
+ u64 = "u64",
+ usize = "usize"
+);
+
+#[lang = "add"]
+pub trait Add<RHS = Self> {
+ type Output;
+
+ fn add(self, rhs: RHS) -> Self::Output;
+}
+macro_rules! add_impl {
+ ($($t:ty)*) => ($(
+ impl Add for $t {
+ type Output = $t;
+
+ fn add(self, other: $t) -> $t { self + other }
+ }
+ )*)
+}
+
+add_impl! { usize u8 u16 u32 u64 /*isize i8 i16 i32 i64*/ f32 f64 }
+
+#[lang = "sub"]
+pub trait Sub<RHS = Self> {
+ type Output;
+
+ fn sub(self, rhs: RHS) -> Self::Output;
+}
+macro_rules! sub_impl {
+ ($($t:ty)*) => ($(
+ impl Sub for $t {
+ type Output = $t;
+
+ fn sub(self, other: $t) -> $t { self - other }
+ }
+ )*)
+}
+
+sub_impl! { usize u8 u16 u32 u64 /*isize i8 i16 i32 i64*/ f32 f64 }
+
+#[lang = "Range"]
+pub struct Range<Idx> {
+ pub start: Idx,
+ pub end: Idx,
+}
+
+pub trait TryFrom<T>: Sized {
+ /// The type returned in the event of a conversion error.
+ type Error;
+
+ /// Performs the conversion.
+ fn try_from(value: T) -> Result<Self, Self::Error>;
+}
+
+pub trait From<T>: Sized {
+ fn from(_: T) -> Self;
+}
+
+impl<T> From<T> for T {
+ fn from(t: T) -> T {
+ t
+ }
+}
+
+impl<T, U> TryFrom<U> for T
+where
+ T: From<U>,
+{
+ type Error = !;
+
+ fn try_from(value: U) -> Result<Self, Self::Error> {
+ Ok(T::from(value))
+ }
+}
+
+trait Step {
+ /// Returns the number of steps between two step objects. The count is
+ /// inclusive of `start` and exclusive of `end`.
+ ///
+ /// Returns `None` if it is not possible to calculate `steps_between`
+ /// without overflow.
+ fn steps_between(start: &Self, end: &Self) -> Option<usize>;
+
+ /// Replaces this step with `1`, returning itself
+ fn replace_one(&mut self) -> Self;
+
+ /// Replaces this step with `0`, returning itself
+ fn replace_zero(&mut self) -> Self;
+
+ /// Adds one to this step, returning the result
+ fn add_one(&self) -> Self;
+
+ /// Subtracts one to this step, returning the result
+ fn sub_one(&self) -> Self;
+
+ /// Add an usize, returning None on overflow
+ fn add_usize(&self, n: usize) -> Option<Self>;
+}
+
+// These are still macro-generated because the integer literals resolve to different types.
+macro_rules! step_identical_methods {
+ () => {
+ #[inline]
+ fn replace_one(&mut self) -> Self {
+ mem::replace(self, 1)
+ }
+
+ #[inline]
+ fn replace_zero(&mut self) -> Self {
+ mem::replace(self, 0)
+ }
+
+ #[inline]
+ fn add_one(&self) -> Self {
+ Add::add(*self, 1)
+ }
+
+ #[inline]
+ fn sub_one(&self) -> Self {
+ Sub::sub(*self, 1)
+ }
+ };
+}
+
+macro_rules! step_impl_unsigned {
+ ($($t:ty)*) => ($(
+ impl Step for $t {
+ fn steps_between(start: &$t, end: &$t) -> Option<usize> {
+ if *start < *end {
+ // Note: We assume $t <= usize here
+ Option::Some((*end - *start) as usize)
+ } else {
+ Option::Some(0)
+ }
+ }
+
+ fn add_usize(&self, n: usize) -> Option<Self> {
+ match <$t>::try_from(n) {
+ Result::Ok(n_as_t) => self.checked_add(n_as_t),
+ Result::Err(_) => Option::None,
+ }
+ }
+
+ step_identical_methods!();
+ }
+ )*)
+}
+macro_rules! step_impl_signed {
+ ($( [$t:ty : $unsigned:ty] )*) => ($(
+ impl Step for $t {
+ #[inline]
+ #[allow(trivial_numeric_casts)]
+ fn steps_between(start: &$t, end: &$t) -> Option<usize> {
+ if *start < *end {
+ // Note: We assume $t <= isize here
+ // Use .wrapping_sub and cast to usize to compute the
+ // difference that may not fit inside the range of isize.
+ Option::Some((*end as isize).wrapping_sub(*start as isize) as usize)
+ } else {
+ Option::Some(0)
+ }
+ }
+
+ #[inline]
+ #[allow(unreachable_patterns)]
+ fn add_usize(&self, n: usize) -> Option<Self> {
+ match <$unsigned>::try_from(n) {
+ Result::Ok(n_as_unsigned) => {
+ // Wrapping in unsigned space handles cases like
+ // `-120_i8.add_usize(200) == Option::Some(80_i8)`,
+ // even though 200_usize is out of range for i8.
+ let wrapped = (*self as $unsigned).wrapping_add(n_as_unsigned) as $t;
+ if wrapped >= *self {
+ Option::Some(wrapped)
+ } else {
+ Option::None // Addition overflowed
+ }
+ }
+ Result::Err(_) => Option::None,
+ }
+ }
+
+ step_identical_methods!();
+ }
+ )*)
+}
+
+macro_rules! step_impl_no_between {
+ ($($t:ty)*) => ($(
+ impl Step for $t {
+ #[inline]
+ fn steps_between(_start: &Self, _end: &Self) -> Option<usize> {
+ Option::None
+ }
+
+ #[inline]
+ fn add_usize(&self, n: usize) -> Option<Self> {
+ self.checked_add(n as $t)
+ }
+
+ step_identical_methods!();
+ }
+ )*)
+}
+
+step_impl_unsigned!(usize);
+
+pub trait Iterator {
+ type Item;
+
+ #[lang = "next"]
+ fn next(&mut self) -> Option<Self::Item>;
+}
+
+impl<A: Step> Iterator for Range<A> {
+ type Item = A;
+
+ fn next(&mut self) -> Option<A> {
+ if self.start < self.end {
+ // We check for overflow here, even though it can't actually
+ // happen. Adding this check does however help llvm vectorize loops
+ // for some ranges that don't get vectorized otherwise,
+ // and this won't actually result in an extra check in an optimized build.
+ match self.start.add_usize(1) {
+ Option::Some(mut n) => {
+ mem::swap(&mut n, &mut self.start);
+ Option::Some(n)
+ }
+ Option::None => Option::None,
+ }
+ } else {
+ Option::None
+ }
+ }
+}
+
+pub trait IntoIterator {
+ type Item;
+
+ type IntoIter: Iterator<Item = Self::Item>;
+
+ #[lang = "into_iter"]
+ fn into_iter(self) -> Self::IntoIter;
+}
+
+impl<I: Iterator> IntoIterator for I {
+ type Item = I::Item;
+ type IntoIter = I;
+
+ fn into_iter(self) -> I {
+ self
+ }
+}
+
+pub fn main() {
+ // make sure we can desugar for-loops inside other blocks
+
+ if true {
+ for _ in 20usize..40usize {
+ unsafe { puts("loop\0" as *const str as *const i8); }
+ }
+ }
+}
diff --git a/gcc/testsuite/rust/compile/nr2/exclude b/gcc/testsuite/rust/compile/nr2/exclude
index 2d9cb76..5124194 100644
--- a/gcc/testsuite/rust/compile/nr2/exclude
+++ b/gcc/testsuite/rust/compile/nr2/exclude
@@ -121,4 +121,6 @@ derive-debug1.rs
issue-3382.rs
derive-default1.rs
issue-3402-1.rs
+for-loop1.rs
+for-loop2.rs
# please don't delete the trailing newline
diff --git a/gcc/testsuite/rust/execute/torture/for-loop1.rs b/gcc/testsuite/rust/execute/torture/for-loop1.rs
new file mode 100644
index 0000000..5a6a70c
--- /dev/null
+++ b/gcc/testsuite/rust/execute/torture/for-loop1.rs
@@ -0,0 +1,545 @@
+// { dg-output "loop\r*\nloop\r*\n" }
+#![feature(intrinsics)]
+
+pub use option::Option::{self, None, Some};
+pub use result::Result::{self, Err, Ok};
+
+extern "C" {
+ fn printf(s: *const i8, ...);
+ fn puts(s: *const i8);
+}
+
+mod option {
+ pub enum Option<T> {
+ #[lang = "None"]
+ None,
+ #[lang = "Some"]
+ Some(T),
+ }
+}
+
+mod result {
+ enum Result<T, E> {
+ Ok(T),
+ Err(E),
+ }
+}
+
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "clone"]
+pub trait Clone: Sized {
+ fn clone(&self) -> Self;
+
+ fn clone_from(&mut self, source: &Self) {
+ *self = source.clone()
+ }
+}
+
+mod impls {
+ use super::Clone;
+
+ macro_rules! impl_clone {
+ ($($t:ty)*) => {
+ $(
+ impl Clone for $t {
+ fn clone(&self) -> Self {
+ *self
+ }
+ }
+ )*
+ }
+ }
+
+ impl_clone! {
+ usize u8 u16 u32 u64 // u128
+ isize i8 i16 i32 i64 // i128
+ f32 f64
+ bool char
+ }
+}
+
+#[lang = "copy"]
+pub trait Copy: Clone {
+ // Empty.
+}
+
+mod copy_impls {
+ use super::Copy;
+
+ macro_rules! impl_copy {
+ ($($t:ty)*) => {
+ $(
+ impl Copy for $t {}
+ )*
+ }
+ }
+
+ impl_copy! {
+ usize u8 u16 u32 u64 // u128
+ isize i8 i16 i32 i64 // i128
+ f32 f64
+ bool char
+ }
+}
+
+mod intrinsics {
+ extern "rust-intrinsic" {
+ pub fn add_with_overflow<T>(x: T, y: T) -> (T, bool);
+ pub fn wrapping_add<T>(a: T, b: T) -> T;
+ pub fn wrapping_sub<T>(a: T, b: T) -> T;
+ pub fn rotate_left<T>(a: T, b: T) -> T;
+ pub fn rotate_right<T>(a: T, b: T) -> T;
+ pub fn offset<T>(ptr: *const T, count: isize) -> *const T;
+ pub fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: usize);
+ pub fn move_val_init<T>(dst: *mut T, src: T);
+ pub fn uninit<T>() -> T;
+ }
+}
+
+mod ptr {
+ #[lang = "const_ptr"]
+ impl<T> *const T {
+ pub unsafe fn offset(self, count: isize) -> *const T {
+ intrinsics::offset(self, count)
+ }
+ }
+
+ #[lang = "mut_ptr"]
+ impl<T> *mut T {
+ pub unsafe fn offset(self, count: isize) -> *mut T {
+ intrinsics::offset(self, count) as *mut T
+ }
+ }
+
+ pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
+ let x = x as *mut u8;
+ let y = y as *mut u8;
+ let len = mem::size_of::<T>() * count;
+ swap_nonoverlapping_bytes(x, y, len)
+ }
+
+ pub unsafe fn swap_nonoverlapping_one<T>(x: *mut T, y: *mut T) {
+ // For types smaller than the block optimization below,
+ // just swap directly to avoid pessimizing codegen.
+ if mem::size_of::<T>() < 32 {
+ let z = read(x);
+ intrinsics::copy_nonoverlapping(y, x, 1);
+ write(y, z);
+ } else {
+ swap_nonoverlapping(x, y, 1);
+ }
+ }
+
+ pub unsafe fn write<T>(dst: *mut T, src: T) {
+ intrinsics::move_val_init(&mut *dst, src)
+ }
+
+ pub unsafe fn read<T>(src: *const T) -> T {
+ let mut tmp: T = mem::uninitialized();
+ intrinsics::copy_nonoverlapping(src, &mut tmp, 1);
+ tmp
+ }
+
+ pub unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) {
+ struct Block(u64, u64, u64, u64);
+ struct UnalignedBlock(u64, u64, u64, u64);
+
+ let block_size = mem::size_of::<Block>();
+
+ // Loop through x & y, copying them `Block` at a time
+ // The optimizer should unroll the loop fully for most types
+ // N.B. We can't use a for loop as the `range` impl calls `mem::swap` recursively
+ let mut i: usize = 0;
+ while i + block_size <= len {
+ // Create some uninitialized memory as scratch space
+ // Declaring `t` here avoids aligning the stack when this loop is unused
+ let mut t: Block = mem::uninitialized();
+ let t = &mut t as *mut _ as *mut u8;
+ let x = x.offset(i as isize);
+ let y = y.offset(i as isize);
+
+ // Swap a block of bytes of x & y, using t as a temporary buffer
+ // This should be optimized into efficient SIMD operations where available
+ intrinsics::copy_nonoverlapping(x, t, block_size);
+ intrinsics::copy_nonoverlapping(y, x, block_size);
+ intrinsics::copy_nonoverlapping(t, y, block_size);
+ i += block_size;
+ }
+
+ if i < len {
+ // Swap any remaining bytes
+ let mut t: UnalignedBlock = mem::uninitialized();
+ let rem = len - i;
+
+ let t = &mut t as *mut _ as *mut u8;
+ let x = x.offset(i as isize);
+ let y = y.offset(i as isize);
+
+ intrinsics::copy_nonoverlapping(x, t, rem);
+ intrinsics::copy_nonoverlapping(y, x, rem);
+ intrinsics::copy_nonoverlapping(t, y, rem);
+ }
+ }
+}
+
+mod mem {
+ extern "rust-intrinsic" {
+ #[rustc_const_stable(feature = "const_transmute", since = "1.46.0")]
+ pub fn transmute<T, U>(_: T) -> U;
+ #[rustc_const_stable(feature = "const_size_of", since = "1.40.0")]
+ pub fn size_of<T>() -> usize;
+ }
+
+ pub fn swap<T>(x: &mut T, y: &mut T) {
+ unsafe {
+ ptr::swap_nonoverlapping_one(x, y);
+ }
+ }
+
+ pub fn replace<T>(dest: &mut T, mut src: T) -> T {
+ swap(dest, &mut src);
+ src
+ }
+
+ pub unsafe fn uninitialized<T>() -> T {
+ intrinsics::uninit()
+ }
+}
+
+macro_rules! impl_uint {
+ ($($ty:ident = $lang:literal),*) => {
+ $(
+ impl $ty {
+ pub fn wrapping_add(self, rhs: Self) -> Self {
+ unsafe {
+ intrinsics::wrapping_add(self, rhs)
+ }
+ }
+
+ pub fn wrapping_sub(self, rhs: Self) -> Self {
+ unsafe {
+ intrinsics::wrapping_sub(self, rhs)
+ }
+ }
+
+ pub fn rotate_left(self, n: u32) -> Self {
+ unsafe {
+ intrinsics::rotate_left(self, n as Self)
+ }
+ }
+
+ pub fn rotate_right(self, n: u32) -> Self {
+ unsafe {
+ intrinsics::rotate_right(self, n as Self)
+ }
+ }
+
+ pub fn to_le(self) -> Self {
+ #[cfg(target_endian = "little")]
+ {
+ self
+ }
+ }
+
+ pub const fn from_le_bytes(bytes: [u8; mem::size_of::<Self>()]) -> Self {
+ Self::from_le(Self::from_ne_bytes(bytes))
+ }
+
+ pub const fn from_le(x: Self) -> Self {
+ #[cfg(target_endian = "little")]
+ {
+ x
+ }
+ }
+
+ pub const fn from_ne_bytes(bytes: [u8; mem::size_of::<Self>()]) -> Self {
+ unsafe { mem::transmute(bytes) }
+ }
+
+ pub fn checked_add(self, rhs: Self) -> Option<Self> {
+ let (a, b) = self.overflowing_add(rhs);
+ if b {
+ Option::None
+ } else {
+ Option::Some(a)
+ }
+ }
+
+ pub fn overflowing_add(self, rhs: Self) -> (Self, bool) {
+ let (a, b) = unsafe { intrinsics::add_with_overflow(self as $ty, rhs as $ty) };
+ (a as Self, b)
+ }
+ }
+ )*
+ }
+}
+
+impl_uint!(
+ u8 = "u8",
+ u16 = "u16",
+ u32 = "u32",
+ u64 = "u64",
+ usize = "usize"
+);
+
+#[lang = "add"]
+pub trait Add<RHS = Self> {
+ type Output;
+
+ fn add(self, rhs: RHS) -> Self::Output;
+}
+macro_rules! add_impl {
+ ($($t:ty)*) => ($(
+ impl Add for $t {
+ type Output = $t;
+
+ fn add(self, other: $t) -> $t { self + other }
+ }
+ )*)
+}
+
+add_impl! { usize u8 u16 u32 u64 /*isize i8 i16 i32 i64*/ f32 f64 }
+
+#[lang = "sub"]
+pub trait Sub<RHS = Self> {
+ type Output;
+
+ fn sub(self, rhs: RHS) -> Self::Output;
+}
+macro_rules! sub_impl {
+ ($($t:ty)*) => ($(
+ impl Sub for $t {
+ type Output = $t;
+
+ fn sub(self, other: $t) -> $t { self - other }
+ }
+ )*)
+}
+
+sub_impl! { usize u8 u16 u32 u64 /*isize i8 i16 i32 i64*/ f32 f64 }
+
+#[lang = "Range"]
+pub struct Range<Idx> {
+ pub start: Idx,
+ pub end: Idx,
+}
+
+pub trait TryFrom<T>: Sized {
+ /// The type returned in the event of a conversion error.
+ type Error;
+
+ /// Performs the conversion.
+ fn try_from(value: T) -> Result<Self, Self::Error>;
+}
+
+pub trait From<T>: Sized {
+ fn from(_: T) -> Self;
+}
+
+impl<T> From<T> for T {
+ fn from(t: T) -> T {
+ t
+ }
+}
+
+impl<T, U> TryFrom<U> for T
+where
+ T: From<U>,
+{
+ type Error = !;
+
+ fn try_from(value: U) -> Result<Self, Self::Error> {
+ Ok(T::from(value))
+ }
+}
+
+trait Step {
+ /// Returns the number of steps between two step objects. The count is
+ /// inclusive of `start` and exclusive of `end`.
+ ///
+ /// Returns `None` if it is not possible to calculate `steps_between`
+ /// without overflow.
+ fn steps_between(start: &Self, end: &Self) -> Option<usize>;
+
+ /// Replaces this step with `1`, returning itself
+ fn replace_one(&mut self) -> Self;
+
+ /// Replaces this step with `0`, returning itself
+ fn replace_zero(&mut self) -> Self;
+
+ /// Adds one to this step, returning the result
+ fn add_one(&self) -> Self;
+
+ /// Subtracts one to this step, returning the result
+ fn sub_one(&self) -> Self;
+
+ /// Add an usize, returning None on overflow
+ fn add_usize(&self, n: usize) -> Option<Self>;
+}
+
+// These are still macro-generated because the integer literals resolve to different types.
+macro_rules! step_identical_methods {
+ () => {
+ #[inline]
+ fn replace_one(&mut self) -> Self {
+ mem::replace(self, 1)
+ }
+
+ #[inline]
+ fn replace_zero(&mut self) -> Self {
+ mem::replace(self, 0)
+ }
+
+ #[inline]
+ fn add_one(&self) -> Self {
+ Add::add(*self, 1)
+ }
+
+ #[inline]
+ fn sub_one(&self) -> Self {
+ Sub::sub(*self, 1)
+ }
+ };
+}
+
+macro_rules! step_impl_unsigned {
+ ($($t:ty)*) => ($(
+ impl Step for $t {
+ fn steps_between(start: &$t, end: &$t) -> Option<usize> {
+ if *start < *end {
+ // Note: We assume $t <= usize here
+ Option::Some((*end - *start) as usize)
+ } else {
+ Option::Some(0)
+ }
+ }
+
+ fn add_usize(&self, n: usize) -> Option<Self> {
+ match <$t>::try_from(n) {
+ Result::Ok(n_as_t) => self.checked_add(n_as_t),
+ Result::Err(_) => Option::None,
+ }
+ }
+
+ step_identical_methods!();
+ }
+ )*)
+}
+macro_rules! step_impl_signed {
+ ($( [$t:ty : $unsigned:ty] )*) => ($(
+ impl Step for $t {
+ #[inline]
+ #[allow(trivial_numeric_casts)]
+ fn steps_between(start: &$t, end: &$t) -> Option<usize> {
+ if *start < *end {
+ // Note: We assume $t <= isize here
+ // Use .wrapping_sub and cast to usize to compute the
+ // difference that may not fit inside the range of isize.
+ Option::Some((*end as isize).wrapping_sub(*start as isize) as usize)
+ } else {
+ Option::Some(0)
+ }
+ }
+
+ #[inline]
+ #[allow(unreachable_patterns)]
+ fn add_usize(&self, n: usize) -> Option<Self> {
+ match <$unsigned>::try_from(n) {
+ Result::Ok(n_as_unsigned) => {
+ // Wrapping in unsigned space handles cases like
+ // `-120_i8.add_usize(200) == Option::Some(80_i8)`,
+ // even though 200_usize is out of range for i8.
+ let wrapped = (*self as $unsigned).wrapping_add(n_as_unsigned) as $t;
+ if wrapped >= *self {
+ Option::Some(wrapped)
+ } else {
+ Option::None // Addition overflowed
+ }
+ }
+ Result::Err(_) => Option::None,
+ }
+ }
+
+ step_identical_methods!();
+ }
+ )*)
+}
+
+macro_rules! step_impl_no_between {
+ ($($t:ty)*) => ($(
+ impl Step for $t {
+ #[inline]
+ fn steps_between(_start: &Self, _end: &Self) -> Option<usize> {
+ Option::None
+ }
+
+ #[inline]
+ fn add_usize(&self, n: usize) -> Option<Self> {
+ self.checked_add(n as $t)
+ }
+
+ step_identical_methods!();
+ }
+ )*)
+}
+
+step_impl_unsigned!(usize);
+
+pub trait Iterator {
+ type Item;
+
+ #[lang = "next"]
+ fn next(&mut self) -> Option<Self::Item>;
+}
+
+impl<A: Step> Iterator for Range<A> {
+ type Item = A;
+
+ fn next(&mut self) -> Option<A> {
+ if self.start < self.end {
+ // We check for overflow here, even though it can't actually
+ // happen. Adding this check does however help llvm vectorize loops
+ // for some ranges that don't get vectorized otherwise,
+ // and this won't actually result in an extra check in an optimized build.
+ match self.start.add_usize(1) {
+ Option::Some(mut n) => {
+ mem::swap(&mut n, &mut self.start);
+ Option::Some(n)
+ }
+ Option::None => Option::None,
+ }
+ } else {
+ Option::None
+ }
+ }
+}
+
+pub trait IntoIterator {
+ type Item;
+
+ type IntoIter: Iterator<Item = Self::Item>;
+
+ #[lang = "into_iter"]
+ fn into_iter(self) -> Self::IntoIter;
+}
+
+impl<I: Iterator> IntoIterator for I {
+ type Item = I::Item;
+ type IntoIter = I;
+
+ fn into_iter(self) -> I {
+ self
+ }
+}
+
+pub fn main() -> i32 {
+ let a = 1usize..3usize;
+
+ for i in a { // { dg-warning "unused name" }
+ unsafe { puts("loop\0" as *const str as *const i8); }
+ }
+
+ 0
+}
diff --git a/gcc/testsuite/rust/execute/torture/for-loop2.rs b/gcc/testsuite/rust/execute/torture/for-loop2.rs
new file mode 100644
index 0000000..5ba2cd1
--- /dev/null
+++ b/gcc/testsuite/rust/execute/torture/for-loop2.rs
@@ -0,0 +1,544 @@
+// { dg-output "loop1\r*\nloop2\r*\n" }
+#![feature(intrinsics)]
+
+pub use option::Option::{self, None, Some};
+pub use result::Result::{self, Err, Ok};
+
+extern "C" {
+ fn printf(s: *const i8, ...);
+}
+
+mod option {
+ pub enum Option<T> {
+ #[lang = "None"]
+ None,
+ #[lang = "Some"]
+ Some(T),
+ }
+}
+
+mod result {
+ enum Result<T, E> {
+ Ok(T),
+ Err(E),
+ }
+}
+
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "clone"]
+pub trait Clone: Sized {
+ fn clone(&self) -> Self;
+
+ fn clone_from(&mut self, source: &Self) {
+ *self = source.clone()
+ }
+}
+
+mod impls {
+ use super::Clone;
+
+ macro_rules! impl_clone {
+ ($($t:ty)*) => {
+ $(
+ impl Clone for $t {
+ fn clone(&self) -> Self {
+ *self
+ }
+ }
+ )*
+ }
+ }
+
+ impl_clone! {
+ usize u8 u16 u32 u64 // u128
+ isize i8 i16 i32 i64 // i128
+ f32 f64
+ bool char
+ }
+}
+
+#[lang = "copy"]
+pub trait Copy: Clone {
+ // Empty.
+}
+
+mod copy_impls {
+ use super::Copy;
+
+ macro_rules! impl_copy {
+ ($($t:ty)*) => {
+ $(
+ impl Copy for $t {}
+ )*
+ }
+ }
+
+ impl_copy! {
+ usize u8 u16 u32 u64 // u128
+ isize i8 i16 i32 i64 // i128
+ f32 f64
+ bool char
+ }
+}
+
+mod intrinsics {
+ extern "rust-intrinsic" {
+ pub fn add_with_overflow<T>(x: T, y: T) -> (T, bool);
+ pub fn wrapping_add<T>(a: T, b: T) -> T;
+ pub fn wrapping_sub<T>(a: T, b: T) -> T;
+ pub fn rotate_left<T>(a: T, b: T) -> T;
+ pub fn rotate_right<T>(a: T, b: T) -> T;
+ pub fn offset<T>(ptr: *const T, count: isize) -> *const T;
+ pub fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: usize);
+ pub fn move_val_init<T>(dst: *mut T, src: T);
+ pub fn uninit<T>() -> T;
+ }
+}
+
+mod ptr {
+ #[lang = "const_ptr"]
+ impl<T> *const T {
+ pub unsafe fn offset(self, count: isize) -> *const T {
+ intrinsics::offset(self, count)
+ }
+ }
+
+ #[lang = "mut_ptr"]
+ impl<T> *mut T {
+ pub unsafe fn offset(self, count: isize) -> *mut T {
+ intrinsics::offset(self, count) as *mut T
+ }
+ }
+
+ pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
+ let x = x as *mut u8;
+ let y = y as *mut u8;
+ let len = mem::size_of::<T>() * count;
+ swap_nonoverlapping_bytes(x, y, len)
+ }
+
+ pub unsafe fn swap_nonoverlapping_one<T>(x: *mut T, y: *mut T) {
+ // For types smaller than the block optimization below,
+ // just swap directly to avoid pessimizing codegen.
+ if mem::size_of::<T>() < 32 {
+ let z = read(x);
+ intrinsics::copy_nonoverlapping(y, x, 1);
+ write(y, z);
+ } else {
+ swap_nonoverlapping(x, y, 1);
+ }
+ }
+
+ pub unsafe fn write<T>(dst: *mut T, src: T) {
+ intrinsics::move_val_init(&mut *dst, src)
+ }
+
+ pub unsafe fn read<T>(src: *const T) -> T {
+ let mut tmp: T = mem::uninitialized();
+ intrinsics::copy_nonoverlapping(src, &mut tmp, 1);
+ tmp
+ }
+
+ pub unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) {
+ struct Block(u64, u64, u64, u64);
+ struct UnalignedBlock(u64, u64, u64, u64);
+
+ let block_size = mem::size_of::<Block>();
+
+ // Loop through x & y, copying them `Block` at a time
+ // The optimizer should unroll the loop fully for most types
+ // N.B. We can't use a for loop as the `range` impl calls `mem::swap` recursively
+ let mut i: usize = 0;
+ while i + block_size <= len {
+ // Create some uninitialized memory as scratch space
+ // Declaring `t` here avoids aligning the stack when this loop is unused
+ let mut t: Block = mem::uninitialized();
+ let t = &mut t as *mut _ as *mut u8;
+ let x = x.offset(i as isize);
+ let y = y.offset(i as isize);
+
+ // Swap a block of bytes of x & y, using t as a temporary buffer
+ // This should be optimized into efficient SIMD operations where available
+ intrinsics::copy_nonoverlapping(x, t, block_size);
+ intrinsics::copy_nonoverlapping(y, x, block_size);
+ intrinsics::copy_nonoverlapping(t, y, block_size);
+ i += block_size;
+ }
+
+ if i < len {
+ // Swap any remaining bytes
+ let mut t: UnalignedBlock = mem::uninitialized();
+ let rem = len - i;
+
+ let t = &mut t as *mut _ as *mut u8;
+ let x = x.offset(i as isize);
+ let y = y.offset(i as isize);
+
+ intrinsics::copy_nonoverlapping(x, t, rem);
+ intrinsics::copy_nonoverlapping(y, x, rem);
+ intrinsics::copy_nonoverlapping(t, y, rem);
+ }
+ }
+}
+
+mod mem {
+ extern "rust-intrinsic" {
+ #[rustc_const_stable(feature = "const_transmute", since = "1.46.0")]
+ pub fn transmute<T, U>(_: T) -> U;
+ #[rustc_const_stable(feature = "const_size_of", since = "1.40.0")]
+ pub fn size_of<T>() -> usize;
+ }
+
+ pub fn swap<T>(x: &mut T, y: &mut T) {
+ unsafe {
+ ptr::swap_nonoverlapping_one(x, y);
+ }
+ }
+
+ pub fn replace<T>(dest: &mut T, mut src: T) -> T {
+ swap(dest, &mut src);
+ src
+ }
+
+ pub unsafe fn uninitialized<T>() -> T {
+ intrinsics::uninit()
+ }
+}
+
+macro_rules! impl_uint {
+ ($($ty:ident = $lang:literal),*) => {
+ $(
+ impl $ty {
+ pub fn wrapping_add(self, rhs: Self) -> Self {
+ unsafe {
+ intrinsics::wrapping_add(self, rhs)
+ }
+ }
+
+ pub fn wrapping_sub(self, rhs: Self) -> Self {
+ unsafe {
+ intrinsics::wrapping_sub(self, rhs)
+ }
+ }
+
+ pub fn rotate_left(self, n: u32) -> Self {
+ unsafe {
+ intrinsics::rotate_left(self, n as Self)
+ }
+ }
+
+ pub fn rotate_right(self, n: u32) -> Self {
+ unsafe {
+ intrinsics::rotate_right(self, n as Self)
+ }
+ }
+
+ pub fn to_le(self) -> Self {
+ #[cfg(target_endian = "little")]
+ {
+ self
+ }
+ }
+
+ pub const fn from_le_bytes(bytes: [u8; mem::size_of::<Self>()]) -> Self {
+ Self::from_le(Self::from_ne_bytes(bytes))
+ }
+
+ pub const fn from_le(x: Self) -> Self {
+ #[cfg(target_endian = "little")]
+ {
+ x
+ }
+ }
+
+ pub const fn from_ne_bytes(bytes: [u8; mem::size_of::<Self>()]) -> Self {
+ unsafe { mem::transmute(bytes) }
+ }
+
+ pub fn checked_add(self, rhs: Self) -> Option<Self> {
+ let (a, b) = self.overflowing_add(rhs);
+ if b {
+ Option::None
+ } else {
+ Option::Some(a)
+ }
+ }
+
+ pub fn overflowing_add(self, rhs: Self) -> (Self, bool) {
+ let (a, b) = unsafe { intrinsics::add_with_overflow(self as $ty, rhs as $ty) };
+ (a as Self, b)
+ }
+ }
+ )*
+ }
+}
+
+impl_uint!(
+ u8 = "u8",
+ u16 = "u16",
+ u32 = "u32",
+ u64 = "u64",
+ usize = "usize"
+);
+
+#[lang = "add"]
+pub trait Add<RHS = Self> {
+ type Output;
+
+ fn add(self, rhs: RHS) -> Self::Output;
+}
+macro_rules! add_impl {
+ ($($t:ty)*) => ($(
+ impl Add for $t {
+ type Output = $t;
+
+ fn add(self, other: $t) -> $t { self + other }
+ }
+ )*)
+}
+
+add_impl! { usize u8 u16 u32 u64 /*isize i8 i16 i32 i64*/ f32 f64 }
+
+#[lang = "sub"]
+pub trait Sub<RHS = Self> {
+ type Output;
+
+ fn sub(self, rhs: RHS) -> Self::Output;
+}
+macro_rules! sub_impl {
+ ($($t:ty)*) => ($(
+ impl Sub for $t {
+ type Output = $t;
+
+ fn sub(self, other: $t) -> $t { self - other }
+ }
+ )*)
+}
+
+sub_impl! { usize u8 u16 u32 u64 /*isize i8 i16 i32 i64*/ f32 f64 }
+
+#[lang = "Range"]
+pub struct Range<Idx> {
+ pub start: Idx,
+ pub end: Idx,
+}
+
+pub trait TryFrom<T>: Sized {
+ /// The type returned in the event of a conversion error.
+ type Error;
+
+ /// Performs the conversion.
+ fn try_from(value: T) -> Result<Self, Self::Error>;
+}
+
+pub trait From<T>: Sized {
+ fn from(_: T) -> Self;
+}
+
+impl<T> From<T> for T {
+ fn from(t: T) -> T {
+ t
+ }
+}
+
+impl<T, U> TryFrom<U> for T
+where
+ T: From<U>,
+{
+ type Error = !;
+
+ fn try_from(value: U) -> Result<Self, Self::Error> {
+ Ok(T::from(value))
+ }
+}
+
+trait Step {
+ /// Returns the number of steps between two step objects. The count is
+ /// inclusive of `start` and exclusive of `end`.
+ ///
+ /// Returns `None` if it is not possible to calculate `steps_between`
+ /// without overflow.
+ fn steps_between(start: &Self, end: &Self) -> Option<usize>;
+
+ /// Replaces this step with `1`, returning itself
+ fn replace_one(&mut self) -> Self;
+
+ /// Replaces this step with `0`, returning itself
+ fn replace_zero(&mut self) -> Self;
+
+ /// Adds one to this step, returning the result
+ fn add_one(&self) -> Self;
+
+ /// Subtracts one to this step, returning the result
+ fn sub_one(&self) -> Self;
+
+ /// Add an usize, returning None on overflow
+ fn add_usize(&self, n: usize) -> Option<Self>;
+}
+
+// These are still macro-generated because the integer literals resolve to different types.
+macro_rules! step_identical_methods {
+ () => {
+ #[inline]
+ fn replace_one(&mut self) -> Self {
+ mem::replace(self, 1)
+ }
+
+ #[inline]
+ fn replace_zero(&mut self) -> Self {
+ mem::replace(self, 0)
+ }
+
+ #[inline]
+ fn add_one(&self) -> Self {
+ Add::add(*self, 1)
+ }
+
+ #[inline]
+ fn sub_one(&self) -> Self {
+ Sub::sub(*self, 1)
+ }
+ };
+}
+
+macro_rules! step_impl_unsigned {
+ ($($t:ty)*) => ($(
+ impl Step for $t {
+ fn steps_between(start: &$t, end: &$t) -> Option<usize> {
+ if *start < *end {
+ // Note: We assume $t <= usize here
+ Option::Some((*end - *start) as usize)
+ } else {
+ Option::Some(0)
+ }
+ }
+
+ fn add_usize(&self, n: usize) -> Option<Self> {
+ match <$t>::try_from(n) {
+ Result::Ok(n_as_t) => self.checked_add(n_as_t),
+ Result::Err(_) => Option::None,
+ }
+ }
+
+ step_identical_methods!();
+ }
+ )*)
+}
+macro_rules! step_impl_signed {
+ ($( [$t:ty : $unsigned:ty] )*) => ($(
+ impl Step for $t {
+ #[inline]
+ #[allow(trivial_numeric_casts)]
+ fn steps_between(start: &$t, end: &$t) -> Option<usize> {
+ if *start < *end {
+ // Note: We assume $t <= isize here
+ // Use .wrapping_sub and cast to usize to compute the
+ // difference that may not fit inside the range of isize.
+ Option::Some((*end as isize).wrapping_sub(*start as isize) as usize)
+ } else {
+ Option::Some(0)
+ }
+ }
+
+ #[inline]
+ #[allow(unreachable_patterns)]
+ fn add_usize(&self, n: usize) -> Option<Self> {
+ match <$unsigned>::try_from(n) {
+ Result::Ok(n_as_unsigned) => {
+ // Wrapping in unsigned space handles cases like
+ // `-120_i8.add_usize(200) == Option::Some(80_i8)`,
+ // even though 200_usize is out of range for i8.
+ let wrapped = (*self as $unsigned).wrapping_add(n_as_unsigned) as $t;
+ if wrapped >= *self {
+ Option::Some(wrapped)
+ } else {
+ Option::None // Addition overflowed
+ }
+ }
+ Result::Err(_) => Option::None,
+ }
+ }
+
+ step_identical_methods!();
+ }
+ )*)
+}
+
+macro_rules! step_impl_no_between {
+ ($($t:ty)*) => ($(
+ impl Step for $t {
+ #[inline]
+ fn steps_between(_start: &Self, _end: &Self) -> Option<usize> {
+ Option::None
+ }
+
+ #[inline]
+ fn add_usize(&self, n: usize) -> Option<Self> {
+ self.checked_add(n as $t)
+ }
+
+ step_identical_methods!();
+ }
+ )*)
+}
+
+step_impl_unsigned!(usize);
+
+pub trait Iterator {
+ type Item;
+
+ #[lang = "next"]
+ fn next(&mut self) -> Option<Self::Item>;
+}
+
+impl<A: Step> Iterator for Range<A> {
+ type Item = A;
+
+ fn next(&mut self) -> Option<A> {
+ if self.start < self.end {
+ // We check for overflow here, even though it can't actually
+ // happen. Adding this check does however help llvm vectorize loops
+ // for some ranges that don't get vectorized otherwise,
+ // and this won't actually result in an extra check in an optimized build.
+ match self.start.add_usize(1) {
+ Option::Some(mut n) => {
+ mem::swap(&mut n, &mut self.start);
+ Option::Some(n)
+ }
+ Option::None => Option::None,
+ }
+ } else {
+ Option::None
+ }
+ }
+}
+
+pub trait IntoIterator {
+ type Item;
+
+ type IntoIter: Iterator<Item = Self::Item>;
+
+ #[lang = "into_iter"]
+ fn into_iter(self) -> Self::IntoIter;
+}
+
+impl<I: Iterator> IntoIterator for I {
+ type Item = I::Item;
+ type IntoIter = I;
+
+ fn into_iter(self) -> I {
+ self
+ }
+}
+
+pub fn main() -> i32 {
+ let a = 1usize..3usize;
+
+ for i in a {
+ unsafe { printf("loop%d\n\0" as *const str as *const i8, i); }
+ }
+
+ 0
+}