; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mcpu=mvp -wasm-disable-explicit-locals -wasm-keep-registers | FileCheck %s ; This test ensures that loads and stores generated for small memcpy et al use ; constant offset folding. target triple = "wasm32-unknown-unknown" define void @call_memset(ptr) #0 { ; CHECK-LABEL: call_memset: ; CHECK: .functype call_memset (i32) -> () ; CHECK-NEXT: # %bb.0: ; CHECK-NEXT: i64.const $push0=, 0 ; CHECK-NEXT: i64.store 8($0):p2align=0, $pop0 ; CHECK-NEXT: i64.const $push1=, 0 ; CHECK-NEXT: i64.store 0($0):p2align=0, $pop1 ; CHECK-NEXT: # fallthrough-return call void @llvm.memset.p0.i32(ptr align 1 %0, i8 0, i32 16, i1 false) ret void } define void @call_memcpy(ptr %dst, ptr %src) #0 { ; CHECK-LABEL: call_memcpy: ; CHECK: .functype call_memcpy (i32, i32) -> () ; CHECK-NEXT: # %bb.0: ; CHECK-NEXT: i64.load $push0=, 8($1):p2align=0 ; CHECK-NEXT: i64.store 8($0):p2align=0, $pop0 ; CHECK-NEXT: i64.load $push1=, 0($1):p2align=0 ; CHECK-NEXT: i64.store 0($0):p2align=0, $pop1 ; CHECK-NEXT: # fallthrough-return call void @llvm.memcpy.p0.p0.i32(ptr align 1 %dst, ptr align 1 %src, i32 16, i1 false) ret void } define void @call_memmove(ptr %dst, ptr %src) #0 { ; CHECK-LABEL: call_memmove: ; CHECK: .functype call_memmove (i32, i32) -> () ; CHECK-NEXT: # %bb.0: ; CHECK-NEXT: i64.load $2=, 0($1):p2align=0 ; CHECK-NEXT: i64.load $push0=, 8($1):p2align=0 ; CHECK-NEXT: i64.store 8($0):p2align=0, $pop0 ; CHECK-NEXT: i64.store 0($0):p2align=0, $2 ; CHECK-NEXT: # fallthrough-return call void @llvm.memmove.p0.p0.i32(ptr align 1 %dst, ptr align 1 %src, i32 16, i1 false) ret void }