aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorPierre-Emmanuel Patry <pierre-emmanuel.patry@embecosm.com>2025-03-20 15:10:07 +0100
committerP-E-P <32375388+P-E-P@users.noreply.github.com>2025-03-24 13:15:51 +0000
commit1d4306f7bccfaebc4eb3a24239af15d64e379f7c (patch)
treed1a7178df40701f03cef4c3851824f12cc959084 /gcc
parentbb08a82b12a39a5ed2f7e178cfcf01081230b047 (diff)
downloadgcc-1d4306f7bccfaebc4eb3a24239af15d64e379f7c.zip
gcc-1d4306f7bccfaebc4eb3a24239af15d64e379f7c.tar.gz
gcc-1d4306f7bccfaebc4eb3a24239af15d64e379f7c.tar.bz2
Fix testcase module path
Those tests are coming from libcore and module inlining was wrong, in libcore there was a use declaration to import those modules which was missing here. gcc/testsuite/ChangeLog: * rust/compile/issue-2330.rs: Use complete path from crate root. * rust/compile/issue-1901.rs: Likewise. * rust/compile/issue-1981.rs: Likewise. * rust/compile/iterators1.rs: Likewise. * rust/compile/sizeof-stray-infer-var-bug.rs: Likewise. * rust/compile/for-loop1.rs: Likewise. * rust/compile/for-loop2.rs: Likewise. * rust/compile/torture/builtin_abort.rs: Likewise. * rust/compile/torture/uninit-intrinsic-1.rs: Likewise. Signed-off-by: Pierre-Emmanuel Patry <pierre-emmanuel.patry@embecosm.com>
Diffstat (limited to 'gcc')
-rw-r--r--gcc/testsuite/rust/compile/for-loop1.rs60
-rw-r--r--gcc/testsuite/rust/compile/for-loop2.rs66
-rw-r--r--gcc/testsuite/rust/compile/issue-1901.rs4
-rw-r--r--gcc/testsuite/rust/compile/issue-1981.rs40
-rw-r--r--gcc/testsuite/rust/compile/issue-2330.rs38
-rw-r--r--gcc/testsuite/rust/compile/iterators1.rs58
-rw-r--r--gcc/testsuite/rust/compile/sizeof-stray-infer-var-bug.rs2
-rw-r--r--gcc/testsuite/rust/compile/torture/builtin_abort.rs4
-rw-r--r--gcc/testsuite/rust/compile/torture/uninit-intrinsic-1.rs4
9 files changed, 139 insertions, 137 deletions
diff --git a/gcc/testsuite/rust/compile/for-loop1.rs b/gcc/testsuite/rust/compile/for-loop1.rs
index 1023ecd..21e0399 100644
--- a/gcc/testsuite/rust/compile/for-loop1.rs
+++ b/gcc/testsuite/rust/compile/for-loop1.rs
@@ -102,30 +102,30 @@ mod ptr {
#[lang = "const_ptr"]
impl<T> *const T {
pub unsafe fn offset(self, count: isize) -> *const T {
- intrinsics::offset(self, count)
+ crate::intrinsics::offset(self, count)
}
}
#[lang = "mut_ptr"]
impl<T> *mut T {
pub unsafe fn offset(self, count: isize) -> *mut T {
- intrinsics::offset(self, count) as *mut T
+ crate::intrinsics::offset(self, count) as *mut T
}
}
pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
let x = x as *mut u8;
let y = y as *mut u8;
- let len = mem::size_of::<T>() * count;
+ let len = crate::mem::size_of::<T>() * count;
swap_nonoverlapping_bytes(x, y, len)
}
pub unsafe fn swap_nonoverlapping_one<T>(x: *mut T, y: *mut T) {
// For types smaller than the block optimization below,
// just swap directly to avoid pessimizing codegen.
- if mem::size_of::<T>() < 32 {
+ if crate::mem::size_of::<T>() < 32 {
let z = read(x);
- intrinsics::copy_nonoverlapping(y, x, 1);
+ crate::intrinsics::copy_nonoverlapping(y, x, 1);
write(y, z);
} else {
swap_nonoverlapping(x, y, 1);
@@ -133,12 +133,12 @@ mod ptr {
}
pub unsafe fn write<T>(dst: *mut T, src: T) {
- intrinsics::move_val_init(&mut *dst, src)
+ crate::intrinsics::move_val_init(&mut *dst, src)
}
pub unsafe fn read<T>(src: *const T) -> T {
- let mut tmp: T = mem::uninitialized();
- intrinsics::copy_nonoverlapping(src, &mut tmp, 1);
+ let mut tmp: T = crate::mem::uninitialized();
+ crate::intrinsics::copy_nonoverlapping(src, &mut tmp, 1);
tmp
}
@@ -146,7 +146,7 @@ mod ptr {
struct Block(u64, u64, u64, u64);
struct UnalignedBlock(u64, u64, u64, u64);
- let block_size = mem::size_of::<Block>();
+ let block_size = crate::mem::size_of::<Block>();
// Loop through x & y, copying them `Block` at a time
// The optimizer should unroll the loop fully for most types
@@ -155,31 +155,31 @@ mod ptr {
while i + block_size <= len {
// Create some uninitialized memory as scratch space
// Declaring `t` here avoids aligning the stack when this loop is unused
- let mut t: Block = mem::uninitialized();
+ let mut t: Block = crate::mem::uninitialized();
let t = &mut t as *mut _ as *mut u8;
let x = x.offset(i as isize);
let y = y.offset(i as isize);
// Swap a block of bytes of x & y, using t as a temporary buffer
// This should be optimized into efficient SIMD operations where available
- intrinsics::copy_nonoverlapping(x, t, block_size);
- intrinsics::copy_nonoverlapping(y, x, block_size);
- intrinsics::copy_nonoverlapping(t, y, block_size);
+ crate::intrinsics::copy_nonoverlapping(x, t, block_size);
+ crate::intrinsics::copy_nonoverlapping(y, x, block_size);
+ crate::intrinsics::copy_nonoverlapping(t, y, block_size);
i += block_size;
}
if i < len {
// Swap any remaining bytes
- let mut t: UnalignedBlock = mem::uninitialized();
+ let mut t: UnalignedBlock = crate::mem::uninitialized();
let rem = len - i;
let t = &mut t as *mut _ as *mut u8;
let x = x.offset(i as isize);
let y = y.offset(i as isize);
- intrinsics::copy_nonoverlapping(x, t, rem);
- intrinsics::copy_nonoverlapping(y, x, rem);
- intrinsics::copy_nonoverlapping(t, y, rem);
+ crate::intrinsics::copy_nonoverlapping(x, t, rem);
+ crate::intrinsics::copy_nonoverlapping(y, x, rem);
+ crate::intrinsics::copy_nonoverlapping(t, y, rem);
}
}
}
@@ -194,7 +194,7 @@ mod mem {
pub fn swap<T>(x: &mut T, y: &mut T) {
unsafe {
- ptr::swap_nonoverlapping_one(x, y);
+ crate::ptr::swap_nonoverlapping_one(x, y);
}
}
@@ -204,7 +204,7 @@ mod mem {
}
pub unsafe fn uninitialized<T>() -> T {
- intrinsics::uninit()
+ crate::intrinsics::uninit()
}
}
@@ -214,25 +214,25 @@ macro_rules! impl_uint {
impl $ty {
pub fn wrapping_add(self, rhs: Self) -> Self {
unsafe {
- intrinsics::wrapping_add(self, rhs)
+ crate::intrinsics::wrapping_add(self, rhs)
}
}
pub fn wrapping_sub(self, rhs: Self) -> Self {
unsafe {
- intrinsics::wrapping_sub(self, rhs)
+ crate::intrinsics::wrapping_sub(self, rhs)
}
}
pub fn rotate_left(self, n: u32) -> Self {
unsafe {
- intrinsics::rotate_left(self, n as Self)
+ crate::intrinsics::rotate_left(self, n as Self)
}
}
pub fn rotate_right(self, n: u32) -> Self {
unsafe {
- intrinsics::rotate_right(self, n as Self)
+ crate::intrinsics::rotate_right(self, n as Self)
}
}
@@ -243,7 +243,7 @@ macro_rules! impl_uint {
}
}
- pub const fn from_le_bytes(bytes: [u8; mem::size_of::<Self>()]) -> Self {
+ pub const fn from_le_bytes(bytes: [u8; crate::mem::size_of::<Self>()]) -> Self {
Self::from_le(Self::from_ne_bytes(bytes))
}
@@ -254,8 +254,8 @@ macro_rules! impl_uint {
}
}
- pub const fn from_ne_bytes(bytes: [u8; mem::size_of::<Self>()]) -> Self {
- unsafe { mem::transmute(bytes) }
+ pub const fn from_ne_bytes(bytes: [u8; crate::mem::size_of::<Self>()]) -> Self {
+ unsafe { crate::mem::transmute(bytes) }
}
pub fn checked_add(self, rhs: Self) -> Option<Self> {
@@ -268,7 +268,7 @@ macro_rules! impl_uint {
}
pub fn overflowing_add(self, rhs: Self) -> (Self, bool) {
- let (a, b) = unsafe { intrinsics::add_with_overflow(self as $ty, rhs as $ty) };
+ let (a, b) = unsafe { crate::intrinsics::add_with_overflow(self as $ty, rhs as $ty) };
(a as Self, b)
}
}
@@ -384,12 +384,12 @@ macro_rules! step_identical_methods {
() => {
#[inline]
fn replace_one(&mut self) -> Self {
- mem::replace(self, 1)
+ crate::mem::replace(self, 1)
}
#[inline]
fn replace_zero(&mut self) -> Self {
- mem::replace(self, 0)
+ crate::mem::replace(self, 0)
}
#[inline]
@@ -505,7 +505,7 @@ impl<A: Step> Iterator for Range<A> {
// and this won't actually result in an extra check in an optimized build.
match self.start.add_usize(1) {
Option::Some(mut n) => {
- mem::swap(&mut n, &mut self.start);
+ crate::mem::swap(&mut n, &mut self.start);
Option::Some(n)
}
Option::None => Option::None,
diff --git a/gcc/testsuite/rust/compile/for-loop2.rs b/gcc/testsuite/rust/compile/for-loop2.rs
index d18bddd..a0ad066 100644
--- a/gcc/testsuite/rust/compile/for-loop2.rs
+++ b/gcc/testsuite/rust/compile/for-loop2.rs
@@ -102,30 +102,30 @@ mod ptr {
#[lang = "const_ptr"]
impl<T> *const T {
pub unsafe fn offset(self, count: isize) -> *const T {
- intrinsics::offset(self, count)
+ crate::intrinsics::offset(self, count)
}
}
#[lang = "mut_ptr"]
impl<T> *mut T {
pub unsafe fn offset(self, count: isize) -> *mut T {
- intrinsics::offset(self, count) as *mut T
+ crate::intrinsics::offset(self, count) as *mut T
}
}
pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
let x = x as *mut u8;
let y = y as *mut u8;
- let len = mem::size_of::<T>() * count;
+ let len = crate::mem::size_of::<T>() * count;
swap_nonoverlapping_bytes(x, y, len)
}
pub unsafe fn swap_nonoverlapping_one<T>(x: *mut T, y: *mut T) {
// For types smaller than the block optimization below,
// just swap directly to avoid pessimizing codegen.
- if mem::size_of::<T>() < 32 {
+ if crate::mem::size_of::<T>() < 32 {
let z = read(x);
- intrinsics::copy_nonoverlapping(y, x, 1);
+ crate::intrinsics::copy_nonoverlapping(y, x, 1);
write(y, z);
} else {
swap_nonoverlapping(x, y, 1);
@@ -133,12 +133,12 @@ mod ptr {
}
pub unsafe fn write<T>(dst: *mut T, src: T) {
- intrinsics::move_val_init(&mut *dst, src)
+ crate::intrinsics::move_val_init(&mut *dst, src)
}
pub unsafe fn read<T>(src: *const T) -> T {
- let mut tmp: T = mem::uninitialized();
- intrinsics::copy_nonoverlapping(src, &mut tmp, 1);
+ let mut tmp: T = crate::mem::uninitialized();
+ crate::intrinsics::copy_nonoverlapping(src, &mut tmp, 1);
tmp
}
@@ -146,7 +146,7 @@ mod ptr {
struct Block(u64, u64, u64, u64);
struct UnalignedBlock(u64, u64, u64, u64);
- let block_size = mem::size_of::<Block>();
+ let block_size = crate::mem::size_of::<Block>();
// Loop through x & y, copying them `Block` at a time
// The optimizer should unroll the loop fully for most types
@@ -155,31 +155,31 @@ mod ptr {
while i + block_size <= len {
// Create some uninitialized memory as scratch space
// Declaring `t` here avoids aligning the stack when this loop is unused
- let mut t: Block = mem::uninitialized();
+ let mut t: Block = crate::mem::uninitialized();
let t = &mut t as *mut _ as *mut u8;
let x = x.offset(i as isize);
let y = y.offset(i as isize);
// Swap a block of bytes of x & y, using t as a temporary buffer
// This should be optimized into efficient SIMD operations where available
- intrinsics::copy_nonoverlapping(x, t, block_size);
- intrinsics::copy_nonoverlapping(y, x, block_size);
- intrinsics::copy_nonoverlapping(t, y, block_size);
+ crate::intrinsics::copy_nonoverlapping(x, t, block_size);
+ crate::intrinsics::copy_nonoverlapping(y, x, block_size);
+ crate::intrinsics::copy_nonoverlapping(t, y, block_size);
i += block_size;
}
if i < len {
// Swap any remaining bytes
- let mut t: UnalignedBlock = mem::uninitialized();
+ let mut t: UnalignedBlock = crate::mem::uninitialized();
let rem = len - i;
let t = &mut t as *mut _ as *mut u8;
let x = x.offset(i as isize);
let y = y.offset(i as isize);
- intrinsics::copy_nonoverlapping(x, t, rem);
- intrinsics::copy_nonoverlapping(y, x, rem);
- intrinsics::copy_nonoverlapping(t, y, rem);
+ crate::intrinsics::copy_nonoverlapping(x, t, rem);
+ crate::intrinsics::copy_nonoverlapping(y, x, rem);
+ crate::intrinsics::copy_nonoverlapping(t, y, rem);
}
}
}
@@ -194,7 +194,7 @@ mod mem {
pub fn swap<T>(x: &mut T, y: &mut T) {
unsafe {
- ptr::swap_nonoverlapping_one(x, y);
+ crate::ptr::swap_nonoverlapping_one(x, y);
}
}
@@ -204,7 +204,7 @@ mod mem {
}
pub unsafe fn uninitialized<T>() -> T {
- intrinsics::uninit()
+ crate::intrinsics::uninit()
}
}
@@ -214,25 +214,25 @@ macro_rules! impl_uint {
impl $ty {
pub fn wrapping_add(self, rhs: Self) -> Self {
unsafe {
- intrinsics::wrapping_add(self, rhs)
+ crate::intrinsics::wrapping_add(self, rhs)
}
}
pub fn wrapping_sub(self, rhs: Self) -> Self {
unsafe {
- intrinsics::wrapping_sub(self, rhs)
+ crate::intrinsics::wrapping_sub(self, rhs)
}
}
pub fn rotate_left(self, n: u32) -> Self {
unsafe {
- intrinsics::rotate_left(self, n as Self)
+ crate::intrinsics::rotate_left(self, n as Self)
}
}
pub fn rotate_right(self, n: u32) -> Self {
unsafe {
- intrinsics::rotate_right(self, n as Self)
+ crate::intrinsics::rotate_right(self, n as Self)
}
}
@@ -243,7 +243,7 @@ macro_rules! impl_uint {
}
}
- pub const fn from_le_bytes(bytes: [u8; mem::size_of::<Self>()]) -> Self {
+ pub const fn from_le_bytes(bytes: [u8; crate::mem::size_of::<Self>()]) -> Self {
Self::from_le(Self::from_ne_bytes(bytes))
}
@@ -254,8 +254,8 @@ macro_rules! impl_uint {
}
}
- pub const fn from_ne_bytes(bytes: [u8; mem::size_of::<Self>()]) -> Self {
- unsafe { mem::transmute(bytes) }
+ pub const fn from_ne_bytes(bytes: [u8; crate::mem::size_of::<Self>()]) -> Self {
+ unsafe { crate::mem::transmute(bytes) }
}
pub fn checked_add(self, rhs: Self) -> Option<Self> {
@@ -268,7 +268,7 @@ macro_rules! impl_uint {
}
pub fn overflowing_add(self, rhs: Self) -> (Self, bool) {
- let (a, b) = unsafe { intrinsics::add_with_overflow(self as $ty, rhs as $ty) };
+ let (a, b) = unsafe { crate::intrinsics::add_with_overflow(self as $ty, rhs as $ty) };
(a as Self, b)
}
}
@@ -384,12 +384,12 @@ macro_rules! step_identical_methods {
() => {
#[inline]
fn replace_one(&mut self) -> Self {
- mem::replace(self, 1)
+ crate::mem::replace(self, 1)
}
#[inline]
fn replace_zero(&mut self) -> Self {
- mem::replace(self, 0)
+ crate::mem::replace(self, 0)
}
#[inline]
@@ -505,7 +505,7 @@ impl<A: Step> Iterator for Range<A> {
// and this won't actually result in an extra check in an optimized build.
match self.start.add_usize(1) {
Option::Some(mut n) => {
- mem::swap(&mut n, &mut self.start);
+ crate::mem::swap(&mut n, &mut self.start);
Option::Some(n)
}
Option::None => Option::None,
@@ -536,10 +536,12 @@ impl<I: Iterator> IntoIterator for I {
pub fn main() {
// make sure we can desugar for-loops inside other blocks
-
+
if true {
for _ in 20usize..40usize {
- unsafe { puts("loop\0" as *const str as *const i8); }
+ unsafe {
+ puts("loop\0" as *const str as *const i8);
+ }
}
}
}
diff --git a/gcc/testsuite/rust/compile/issue-1901.rs b/gcc/testsuite/rust/compile/issue-1901.rs
index cfd8ef4..b43e34f 100644
--- a/gcc/testsuite/rust/compile/issue-1901.rs
+++ b/gcc/testsuite/rust/compile/issue-1901.rs
@@ -13,14 +13,14 @@ mod ptr {
#[lang = "const_ptr"]
impl<T> *const T {
pub unsafe fn offset(self, count: isize) -> *const T {
- intrinsics::offset(self, count)
+ crate::intrinsics::offset(self, count)
}
}
#[lang = "mut_ptr"]
impl<T> *mut T {
pub unsafe fn offset(self, count: isize) -> *mut T {
- intrinsics::offset(self, count) as *mut T
+ crate::intrinsics::offset(self, count) as *mut T
}
}
}
diff --git a/gcc/testsuite/rust/compile/issue-1981.rs b/gcc/testsuite/rust/compile/issue-1981.rs
index bfd8d2c..de9588c 100644
--- a/gcc/testsuite/rust/compile/issue-1981.rs
+++ b/gcc/testsuite/rust/compile/issue-1981.rs
@@ -16,30 +16,30 @@ mod ptr {
#[lang = "const_ptr"]
impl<T> *const T {
pub unsafe fn offset(self, count: isize) -> *const T {
- intrinsics::offset(self, count)
+ crate::intrinsics::offset(self, count)
}
}
#[lang = "mut_ptr"]
impl<T> *mut T {
pub unsafe fn offset(self, count: isize) -> *mut T {
- intrinsics::offset(self, count) as *mut T
+ crate::intrinsics::offset(self, count) as *mut T
}
}
pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
let x = x as *mut u8;
let y = y as *mut u8;
- let len = mem::size_of::<T>() * count;
+ let len = crate::mem::size_of::<T>() * count;
swap_nonoverlapping_bytes(x, y, len)
}
pub unsafe fn swap_nonoverlapping_one<T>(x: *mut T, y: *mut T) {
// For types smaller than the block optimization below,
// just swap directly to avoid pessimizing codegen.
- if mem::size_of::<T>() < 32 {
+ if crate::mem::size_of::<T>() < 32 {
let z = read(x);
- intrinsics::copy_nonoverlapping(y, x, 1);
+ crate::intrinsics::copy_nonoverlapping(y, x, 1);
write(y, z);
} else {
swap_nonoverlapping(x, y, 1);
@@ -47,12 +47,12 @@ mod ptr {
}
pub unsafe fn write<T>(dst: *mut T, src: T) {
- intrinsics::move_val_init(&mut *dst, src)
+ crate::intrinsics::move_val_init(&mut *dst, src)
}
pub unsafe fn read<T>(src: *const T) -> T {
- let mut tmp: T = mem::uninitialized();
- intrinsics::copy_nonoverlapping(src, &mut tmp, 1);
+ let mut tmp: T = crate::mem::uninitialized();
+ crate::intrinsics::copy_nonoverlapping(src, &mut tmp, 1);
tmp
}
@@ -60,7 +60,7 @@ mod ptr {
struct Block(u64, u64, u64, u64);
struct UnalignedBlock(u64, u64, u64, u64);
- let block_size = mem::size_of::<Block>();
+ let block_size = crate::mem::size_of::<Block>();
// Loop through x & y, copying them `Block` at a time
// The optimizer should unroll the loop fully for most types
@@ -69,31 +69,31 @@ mod ptr {
while i + block_size <= len {
// Create some uninitialized memory as scratch space
// Declaring `t` here avoids aligning the stack when this loop is unused
- let mut t: Block = mem::uninitialized();
+ let mut t: Block = crate::mem::uninitialized();
let t = &mut t as *mut _ as *mut u8;
let x = x.offset(i as isize);
let y = y.offset(i as isize);
// Swap a block of bytes of x & y, using t as a temporary buffer
// This should be optimized into efficient SIMD operations where available
- intrinsics::copy_nonoverlapping(x, t, block_size);
- intrinsics::copy_nonoverlapping(y, x, block_size);
- intrinsics::copy_nonoverlapping(t, y, block_size);
+ crate::intrinsics::copy_nonoverlapping(x, t, block_size);
+ crate::intrinsics::copy_nonoverlapping(y, x, block_size);
+ crate::intrinsics::copy_nonoverlapping(t, y, block_size);
i += block_size;
}
if i < len {
// Swap any remaining bytes
- let mut t: UnalignedBlock = mem::uninitialized();
+ let mut t: UnalignedBlock = crate::mem::uninitialized();
let rem = len - i;
let t = &mut t as *mut _ as *mut u8;
let x = x.offset(i as isize);
let y = y.offset(i as isize);
- intrinsics::copy_nonoverlapping(x, t, rem);
- intrinsics::copy_nonoverlapping(y, x, rem);
- intrinsics::copy_nonoverlapping(t, y, rem);
+ crate::intrinsics::copy_nonoverlapping(x, t, rem);
+ crate::intrinsics::copy_nonoverlapping(y, x, rem);
+ crate::intrinsics::copy_nonoverlapping(t, y, rem);
}
}
}
@@ -106,7 +106,7 @@ mod mem {
pub fn swap<T>(x: &mut T, y: &mut T) {
unsafe {
- ptr::swap_nonoverlapping_one(x, y);
+ crate::ptr::swap_nonoverlapping_one(x, y);
}
}
@@ -116,7 +116,7 @@ mod mem {
}
pub unsafe fn uninitialized<T>() -> T {
- intrinsics::uninit()
+ crate::intrinsics::uninit()
}
}
@@ -126,7 +126,7 @@ trait Step {
impl Step for i32 {
fn replace_zero(&mut self) -> Self {
- mem::replace(self, 0)
+ crate::mem::replace(self, 0)
}
}
diff --git a/gcc/testsuite/rust/compile/issue-2330.rs b/gcc/testsuite/rust/compile/issue-2330.rs
index 97c1503..6ab46c7 100644
--- a/gcc/testsuite/rust/compile/issue-2330.rs
+++ b/gcc/testsuite/rust/compile/issue-2330.rs
@@ -95,30 +95,30 @@ mod ptr {
#[lang = "const_ptr"]
impl<T> *const T {
pub unsafe fn offset(self, count: isize) -> *const T {
- intrinsics::offset(self, count)
+ crate::intrinsics::offset(self, count)
}
}
#[lang = "mut_ptr"]
impl<T> *mut T {
pub unsafe fn offset(self, count: isize) -> *mut T {
- intrinsics::offset(self, count) as *mut T
+ crate::intrinsics::offset(self, count) as *mut T
}
}
pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
let x = x as *mut u8;
let y = y as *mut u8;
- let len = mem::size_of::<T>() * count;
+ let len = crate::mem::size_of::<T>() * count;
swap_nonoverlapping_bytes(x, y, len)
}
pub unsafe fn swap_nonoverlapping_one<T>(x: *mut T, y: *mut T) {
// For types smaller than the block optimization below,
// just swap directly to avoid pessimizing codegen.
- if mem::size_of::<T>() < 32 {
+ if crate::mem::size_of::<T>() < 32 {
let z = read(x);
- intrinsics::copy_nonoverlapping(y, x, 1);
+ crate::intrinsics::copy_nonoverlapping(y, x, 1);
write(y, z);
} else {
swap_nonoverlapping(x, y, 1);
@@ -126,12 +126,12 @@ mod ptr {
}
pub unsafe fn write<T>(dst: *mut T, src: T) {
- intrinsics::move_val_init(&mut *dst, src)
+ crate::intrinsics::move_val_init(&mut *dst, src)
}
pub unsafe fn read<T>(src: *const T) -> T {
- let mut tmp: T = mem::uninitialized();
- intrinsics::copy_nonoverlapping(src, &mut tmp, 1);
+ let mut tmp: T = crate::mem::uninitialized();
+ crate::intrinsics::copy_nonoverlapping(src, &mut tmp, 1);
tmp
}
@@ -139,7 +139,7 @@ mod ptr {
struct Block(u64, u64, u64, u64);
struct UnalignedBlock(u64, u64, u64, u64);
- let block_size = mem::size_of::<Block>();
+ let block_size = crate::mem::size_of::<Block>();
// Loop through x & y, copying them `Block` at a time
// The optimizer should unroll the loop fully for most types
@@ -148,31 +148,31 @@ mod ptr {
while i + block_size <= len {
// Create some uninitialized memory as scratch space
// Declaring `t` here avoids aligning the stack when this loop is unused
- let mut t: Block = mem::uninitialized();
+ let mut t: Block = crate::mem::uninitialized();
let t = &mut t as *mut _ as *mut u8;
let x = x.offset(i as isize);
let y = y.offset(i as isize);
// Swap a block of bytes of x & y, using t as a temporary buffer
// This should be optimized into efficient SIMD operations where available
- intrinsics::copy_nonoverlapping(x, t, block_size);
- intrinsics::copy_nonoverlapping(y, x, block_size);
- intrinsics::copy_nonoverlapping(t, y, block_size);
+ crate::intrinsics::copy_nonoverlapping(x, t, block_size);
+ crate::intrinsics::copy_nonoverlapping(y, x, block_size);
+ crate::intrinsics::copy_nonoverlapping(t, y, block_size);
i += block_size;
}
if i < len {
// Swap any remaining bytes
- let mut t: UnalignedBlock = mem::uninitialized();
+ let mut t: UnalignedBlock = crate::mem::uninitialized();
let rem = len - i;
let t = &mut t as *mut _ as *mut u8;
let x = x.offset(i as isize);
let y = y.offset(i as isize);
- intrinsics::copy_nonoverlapping(x, t, rem);
- intrinsics::copy_nonoverlapping(y, x, rem);
- intrinsics::copy_nonoverlapping(t, y, rem);
+ crate::intrinsics::copy_nonoverlapping(x, t, rem);
+ crate::intrinsics::copy_nonoverlapping(y, x, rem);
+ crate::intrinsics::copy_nonoverlapping(t, y, rem);
}
}
}
@@ -185,7 +185,7 @@ mod mem {
pub fn swap<T>(x: &mut T, y: &mut T) {
unsafe {
- ptr::swap_nonoverlapping_one(x, y);
+ crate::ptr::swap_nonoverlapping_one(x, y);
}
}
@@ -195,6 +195,6 @@ mod mem {
}
pub unsafe fn uninitialized<T>() -> T {
- intrinsics::uninit()
+ crate::intrinsics::uninit()
}
}
diff --git a/gcc/testsuite/rust/compile/iterators1.rs b/gcc/testsuite/rust/compile/iterators1.rs
index 1141758..2ea3d74 100644
--- a/gcc/testsuite/rust/compile/iterators1.rs
+++ b/gcc/testsuite/rust/compile/iterators1.rs
@@ -98,30 +98,30 @@ mod ptr {
#[lang = "const_ptr"]
impl<T> *const T {
pub unsafe fn offset(self, count: isize) -> *const T {
- intrinsics::offset(self, count)
+ crate::intrinsics::offset(self, count)
}
}
#[lang = "mut_ptr"]
impl<T> *mut T {
pub unsafe fn offset(self, count: isize) -> *mut T {
- intrinsics::offset(self, count) as *mut T
+ crate::intrinsics::offset(self, count) as *mut T
}
}
pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
let x = x as *mut u8;
let y = y as *mut u8;
- let len = mem::size_of::<T>() * count;
+ let len = crate::mem::size_of::<T>() * count;
swap_nonoverlapping_bytes(x, y, len)
}
pub unsafe fn swap_nonoverlapping_one<T>(x: *mut T, y: *mut T) {
// For types smaller than the block optimization below,
// just swap directly to avoid pessimizing codegen.
- if mem::size_of::<T>() < 32 {
+ if crate::mem::size_of::<T>() < 32 {
let z = read(x);
- intrinsics::copy_nonoverlapping(y, x, 1);
+ crate::intrinsics::copy_nonoverlapping(y, x, 1);
write(y, z);
} else {
swap_nonoverlapping(x, y, 1);
@@ -129,12 +129,12 @@ mod ptr {
}
pub unsafe fn write<T>(dst: *mut T, src: T) {
- intrinsics::move_val_init(&mut *dst, src)
+ crate::intrinsics::move_val_init(&mut *dst, src)
}
pub unsafe fn read<T>(src: *const T) -> T {
- let mut tmp: T = mem::uninitialized();
- intrinsics::copy_nonoverlapping(src, &mut tmp, 1);
+ let mut tmp: T = crate::mem::uninitialized();
+ crate::intrinsics::copy_nonoverlapping(src, &mut tmp, 1);
tmp
}
@@ -142,7 +142,7 @@ mod ptr {
struct Block(u64, u64, u64, u64);
struct UnalignedBlock(u64, u64, u64, u64);
- let block_size = mem::size_of::<Block>();
+ let block_size = crate::mem::size_of::<Block>();
// Loop through x & y, copying them `Block` at a time
// The optimizer should unroll the loop fully for most types
@@ -151,31 +151,31 @@ mod ptr {
while i + block_size <= len {
// Create some uninitialized memory as scratch space
// Declaring `t` here avoids aligning the stack when this loop is unused
- let mut t: Block = mem::uninitialized();
+ let mut t: Block = crate::mem::uninitialized();
let t = &mut t as *mut _ as *mut u8;
let x = x.offset(i as isize);
let y = y.offset(i as isize);
// Swap a block of bytes of x & y, using t as a temporary buffer
// This should be optimized into efficient SIMD operations where available
- intrinsics::copy_nonoverlapping(x, t, block_size);
- intrinsics::copy_nonoverlapping(y, x, block_size);
- intrinsics::copy_nonoverlapping(t, y, block_size);
+ crate::intrinsics::copy_nonoverlapping(x, t, block_size);
+ crate::intrinsics::copy_nonoverlapping(y, x, block_size);
+ crate::intrinsics::copy_nonoverlapping(t, y, block_size);
i += block_size;
}
if i < len {
// Swap any remaining bytes
- let mut t: UnalignedBlock = mem::uninitialized();
+ let mut t: UnalignedBlock = crate::mem::uninitialized();
let rem = len - i;
let t = &mut t as *mut _ as *mut u8;
let x = x.offset(i as isize);
let y = y.offset(i as isize);
- intrinsics::copy_nonoverlapping(x, t, rem);
- intrinsics::copy_nonoverlapping(y, x, rem);
- intrinsics::copy_nonoverlapping(t, y, rem);
+ crate::intrinsics::copy_nonoverlapping(x, t, rem);
+ crate::intrinsics::copy_nonoverlapping(y, x, rem);
+ crate::intrinsics::copy_nonoverlapping(t, y, rem);
}
}
}
@@ -190,7 +190,7 @@ mod mem {
pub fn swap<T>(x: &mut T, y: &mut T) {
unsafe {
- ptr::swap_nonoverlapping_one(x, y);
+ crate::ptr::swap_nonoverlapping_one(x, y);
}
}
@@ -200,7 +200,7 @@ mod mem {
}
pub unsafe fn uninitialized<T>() -> T {
- intrinsics::uninit()
+ crate::intrinsics::uninit()
}
}
@@ -210,30 +210,30 @@ macro_rules! impl_uint {
impl $ty {
pub fn wrapping_add(self, rhs: Self) -> Self {
unsafe {
- intrinsics::wrapping_add(self, rhs)
+ crate::intrinsics::wrapping_add(self, rhs)
}
}
pub fn wrapping_sub(self, rhs: Self) -> Self {
unsafe {
- intrinsics::wrapping_sub(self, rhs)
+ crate::intrinsics::wrapping_sub(self, rhs)
}
}
pub fn rotate_left(self, n: u32) -> Self {
unsafe {
- intrinsics::rotate_left(self, n as Self)
+ crate::intrinsics::rotate_left(self, n as Self)
}
}
pub fn rotate_right(self, n: u32) -> Self {
unsafe {
- intrinsics::rotate_right(self, n as Self)
+ crate::intrinsics::rotate_right(self, n as Self)
}
}
- pub const fn from_ne_bytes(bytes: [u8; mem::size_of::<Self>()]) -> Self {
- unsafe { mem::transmute(bytes) }
+ pub const fn from_ne_bytes(bytes: [u8; crate::mem::size_of::<Self>()]) -> Self {
+ unsafe { crate::mem::transmute(bytes) }
}
pub fn checked_add(self, rhs: Self) -> Option<Self> {
@@ -246,7 +246,7 @@ macro_rules! impl_uint {
}
pub fn overflowing_add(self, rhs: Self) -> (Self, bool) {
- let (a, b) = unsafe { intrinsics::add_with_overflow(self as $ty, rhs as $ty) };
+ let (a, b) = unsafe { crate::intrinsics::add_with_overflow(self as $ty, rhs as $ty) };
(a as Self, b)
}
}
@@ -362,12 +362,12 @@ macro_rules! step_identical_methods {
() => {
#[inline]
fn replace_one(&mut self) -> Self {
- mem::replace(self, 1)
+ crate::mem::replace(self, 1)
}
#[inline]
fn replace_zero(&mut self) -> Self {
- mem::replace(self, 0)
+ crate::mem::replace(self, 0)
}
#[inline]
@@ -482,7 +482,7 @@ impl<A: Step> Iterator for Range<A> {
// and this won't actually result in an extra check in an optimized build.
match self.start.add_usize(1) {
Option::Some(mut n) => {
- mem::swap(&mut n, &mut self.start);
+ crate::mem::swap(&mut n, &mut self.start);
Option::Some(n)
}
Option::None => Option::None,
diff --git a/gcc/testsuite/rust/compile/sizeof-stray-infer-var-bug.rs b/gcc/testsuite/rust/compile/sizeof-stray-infer-var-bug.rs
index 8275691..c46a97d 100644
--- a/gcc/testsuite/rust/compile/sizeof-stray-infer-var-bug.rs
+++ b/gcc/testsuite/rust/compile/sizeof-stray-infer-var-bug.rs
@@ -14,6 +14,6 @@ mod ptr {
pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
let x = x as *mut T;
let y = y as *mut T;
- let len = mem::size_of::<T>() * count;
+ let len = crate::mem::size_of::<T>() * count;
}
}
diff --git a/gcc/testsuite/rust/compile/torture/builtin_abort.rs b/gcc/testsuite/rust/compile/torture/builtin_abort.rs
index 3112cdc..919caa4 100644
--- a/gcc/testsuite/rust/compile/torture/builtin_abort.rs
+++ b/gcc/testsuite/rust/compile/torture/builtin_abort.rs
@@ -12,7 +12,7 @@ mod intrinsics {
}
}
-pub fn main () -> i32 {
- abort();
+pub fn main() -> i32 {
+ crate::intrinsics::abort();
0
}
diff --git a/gcc/testsuite/rust/compile/torture/uninit-intrinsic-1.rs b/gcc/testsuite/rust/compile/torture/uninit-intrinsic-1.rs
index fa329c6..af1cb54 100644
--- a/gcc/testsuite/rust/compile/torture/uninit-intrinsic-1.rs
+++ b/gcc/testsuite/rust/compile/torture/uninit-intrinsic-1.rs
@@ -11,7 +11,7 @@ mod intrinsics {
mod mem {
pub unsafe fn uninitialized<T>() -> T {
- intrinsics::uninit()
+ crate::intrinsics::uninit()
}
}
@@ -21,6 +21,6 @@ struct Foo(i32, i32);
impl Foo {
pub fn new() -> Self {
- unsafe { mem::uninitialized::<Foo>() }
+ unsafe { crate::mem::uninitialized::<Foo>() }
}
}