diff options
Diffstat (limited to 'llvm/test')
594 files changed, 11556 insertions, 11852 deletions
diff --git a/llvm/test/Analysis/BasicAA/modref.ll b/llvm/test/Analysis/BasicAA/modref.ll index 1aab28f3..4a91fee 100644 --- a/llvm/test/Analysis/BasicAA/modref.ll +++ b/llvm/test/Analysis/BasicAA/modref.ll @@ -2,7 +2,7 @@ ; RUN: opt < %s -aa-pipeline=basic-aa -passes=gvn,dse -S | FileCheck %s target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128" -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) declare void @external(ptr) @@ -71,7 +71,7 @@ define void @test3(i8 %X) { ; CHECK-LABEL: @test3( ; CHECK-NEXT: [[P:%.*]] = alloca i64, align 8 ; CHECK-NEXT: [[P2:%.*]] = getelementptr i8, ptr [[P]], i32 2 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[P]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[P]]) ; CHECK-NEXT: store i8 2, ptr [[P2]], align 1 ; CHECK-NEXT: call void @external(ptr [[P]]) ; CHECK-NEXT: ret void @@ -81,7 +81,7 @@ define void @test3(i8 %X) { %P2 = getelementptr i8, ptr %P, i32 2 store i8 %Y, ptr %P2 ;; Not read by lifetime.end, should be removed. - call void @llvm.lifetime.end.p0(i64 1, ptr %P) + call void @llvm.lifetime.end.p0(ptr %P) store i8 2, ptr %P2 call void @external(ptr %P) ret void @@ -90,7 +90,7 @@ define void @test3(i8 %X) { define void @test3a(i8 %X) { ; CHECK-LABEL: @test3a( ; CHECK-NEXT: [[P:%.*]] = alloca i64, align 8 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 10, ptr [[P]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[P]]) ; CHECK-NEXT: ret void ; %P = alloca i64 @@ -98,7 +98,7 @@ define void @test3a(i8 %X) { %P2 = getelementptr i8, ptr %P, i32 2 store i8 %Y, ptr %P2 - call void @llvm.lifetime.end.p0(i64 10, ptr %P) + call void @llvm.lifetime.end.p0(ptr %P) ret void } diff --git a/llvm/test/Analysis/BasicAA/phi-values-usage.ll b/llvm/test/Analysis/BasicAA/phi-values-usage.ll index 43df41c..680e1df 100644 --- a/llvm/test/Analysis/BasicAA/phi-values-usage.ll +++ b/llvm/test/Analysis/BasicAA/phi-values-usage.ll @@ -14,7 +14,7 @@ target datalayout = "p:8:8-n8" declare void @otherfn(ptr) declare i32 @__gxx_personality_v0(...) -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) @c = external global ptr, align 1 ; This function is one where if we didn't free basicaa after memcpyopt then the @@ -65,7 +65,7 @@ for.body: ; preds = %for.cond br label %for.cond for.cond.cleanup: ; preds = %for.cond - call void @llvm.lifetime.end.p0(i64 1, ptr %a) + call void @llvm.lifetime.end.p0(ptr %a) %1 = load ptr, ptr %d.0, align 1 store ptr %1, ptr @c, align 1 ret void diff --git a/llvm/test/Analysis/CallGraph/ignore-assumelike-calls.ll b/llvm/test/Analysis/CallGraph/ignore-assumelike-calls.ll index 1c9d201..b93a2a0 100644 --- a/llvm/test/Analysis/CallGraph/ignore-assumelike-calls.ll +++ b/llvm/test/Analysis/CallGraph/ignore-assumelike-calls.ll @@ -29,7 +29,7 @@ define internal void @used_by_lifetime() { entry: %a = alloca i8 - call void @llvm.lifetime.start.p0(i64 4, ptr %a) + call void @llvm.lifetime.start.p0(ptr %a) ret void } @@ -55,6 +55,6 @@ define internal void @other_cast_intrinsic_use() { ret void } -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) declare void @llvm.memset.p0.i64(ptr, i8, i64, i1 immarg) declare void @llvm.memset.p1.i64(ptr addrspace(1), i8, i64, i1 immarg) diff --git a/llvm/test/Analysis/CostModel/AArch64/arith-fp.ll b/llvm/test/Analysis/CostModel/AArch64/arith-fp.ll index de1b39d..0a154d09 100644 --- a/llvm/test/Analysis/CostModel/AArch64/arith-fp.ll +++ b/llvm/test/Analysis/CostModel/AArch64/arith-fp.ll @@ -34,10 +34,10 @@ define void @fadd() { define void @fadd_fp16() { ; CHECK-BASE-LABEL: 'fadd_fp16' -; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %F16 = fadd half undef, undef -; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V4F16 = fadd <4 x half> undef, undef -; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V8F16 = fadd <8 x half> undef, undef -; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:4 CodeSize:1 Lat:3 SizeLat:1 for: %V16F16 = fadd <16 x half> undef, undef +; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:4 CodeSize:1 Lat:3 SizeLat:1 for: %F16 = fadd half undef, undef +; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:4 CodeSize:1 Lat:3 SizeLat:1 for: %V4F16 = fadd <4 x half> undef, undef +; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:8 CodeSize:1 Lat:3 SizeLat:1 for: %V8F16 = fadd <8 x half> undef, undef +; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:16 CodeSize:1 Lat:3 SizeLat:1 for: %V16F16 = fadd <16 x half> undef, undef ; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void ; ; CHECK-FP16-LABEL: 'fadd_fp16' @@ -84,10 +84,10 @@ define void @fsub() { define void @fsub_fp16() { ; CHECK-BASE-LABEL: 'fsub_fp16' -; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %F16 = fsub half undef, undef -; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V4F16 = fsub <4 x half> undef, undef -; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V8F16 = fsub <8 x half> undef, undef -; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:4 CodeSize:1 Lat:3 SizeLat:1 for: %V16F16 = fsub <16 x half> undef, undef +; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:4 CodeSize:1 Lat:3 SizeLat:1 for: %F16 = fsub half undef, undef +; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:4 CodeSize:1 Lat:3 SizeLat:1 for: %V4F16 = fsub <4 x half> undef, undef +; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:8 CodeSize:1 Lat:3 SizeLat:1 for: %V8F16 = fsub <8 x half> undef, undef +; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:16 CodeSize:1 Lat:3 SizeLat:1 for: %V16F16 = fsub <16 x half> undef, undef ; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void ; ; CHECK-FP16-LABEL: 'fsub_fp16' @@ -134,9 +134,9 @@ define void @fneg_idiom() { define void @fneg_idiom_fp16() { ; CHECK-BASE-LABEL: 'fneg_idiom_fp16' -; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %F16 = fsub half 0xH8000, undef -; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V4F16 = fsub <4 x half> splat (half 0xH8000), undef -; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V8F16 = fsub <8 x half> splat (half 0xH8000), undef +; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:3 CodeSize:1 Lat:3 SizeLat:1 for: %F16 = fsub half 0xH8000, undef +; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:3 CodeSize:1 Lat:3 SizeLat:1 for: %V4F16 = fsub <4 x half> splat (half 0xH8000), undef +; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:6 CodeSize:1 Lat:3 SizeLat:1 for: %V8F16 = fsub <8 x half> splat (half 0xH8000), undef ; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void ; ; CHECK-FP16-LABEL: 'fneg_idiom_fp16' @@ -180,21 +180,13 @@ define void @fneg() { } define void @fneg_fp16() { -; CHECK-BASE-LABEL: 'fneg_fp16' -; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %F16 = fneg half undef -; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V2F16 = fneg <2 x half> undef -; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V4F16 = fneg <4 x half> undef -; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V8F16 = fneg <8 x half> undef -; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:4 CodeSize:1 Lat:3 SizeLat:1 for: %V16F16 = fneg <16 x half> undef -; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void -; -; CHECK-FP16-LABEL: 'fneg_fp16' -; CHECK-FP16-NEXT: Cost Model: Found costs of RThru:1 CodeSize:1 Lat:3 SizeLat:1 for: %F16 = fneg half undef -; CHECK-FP16-NEXT: Cost Model: Found costs of RThru:1 CodeSize:1 Lat:3 SizeLat:1 for: %V2F16 = fneg <2 x half> undef -; CHECK-FP16-NEXT: Cost Model: Found costs of RThru:1 CodeSize:1 Lat:3 SizeLat:1 for: %V4F16 = fneg <4 x half> undef -; CHECK-FP16-NEXT: Cost Model: Found costs of RThru:1 CodeSize:1 Lat:3 SizeLat:1 for: %V8F16 = fneg <8 x half> undef -; CHECK-FP16-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V16F16 = fneg <16 x half> undef -; CHECK-FP16-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void +; CHECK-LABEL: 'fneg_fp16' +; CHECK-NEXT: Cost Model: Found costs of RThru:1 CodeSize:1 Lat:3 SizeLat:1 for: %F16 = fneg half undef +; CHECK-NEXT: Cost Model: Found costs of RThru:1 CodeSize:1 Lat:3 SizeLat:1 for: %V2F16 = fneg <2 x half> undef +; CHECK-NEXT: Cost Model: Found costs of RThru:1 CodeSize:1 Lat:3 SizeLat:1 for: %V4F16 = fneg <4 x half> undef +; CHECK-NEXT: Cost Model: Found costs of RThru:1 CodeSize:1 Lat:3 SizeLat:1 for: %V8F16 = fneg <8 x half> undef +; CHECK-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V16F16 = fneg <16 x half> undef +; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void ; %F16 = fneg half undef %V2F16 = fneg <2 x half> undef @@ -252,16 +244,16 @@ define void @fmulfneg() { define void @fmulneg_fp16() { ; CHECK-BASE-LABEL: 'fmulneg_fp16' -; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %F16 = fneg half undef -; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %F16M = fmul half %F16, undef -; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V2F16 = fneg <2 x half> undef -; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V2F16M = fmul <2 x half> %V2F16, undef -; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V4F16 = fneg <4 x half> undef -; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V4F16M = fmul <4 x half> %V4F16, undef -; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V8F16 = fneg <8 x half> undef -; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V8F16M = fmul <8 x half> %V8F16, undef -; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:4 CodeSize:1 Lat:3 SizeLat:1 for: %V16F16 = fneg <16 x half> undef -; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:4 CodeSize:1 Lat:3 SizeLat:1 for: %V16F16M = fmul <16 x half> %V16F16, undef +; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:1 CodeSize:1 Lat:3 SizeLat:1 for: %F16 = fneg half undef +; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:5 CodeSize:1 Lat:3 SizeLat:1 for: %F16M = fmul half %F16, undef +; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:1 CodeSize:1 Lat:3 SizeLat:1 for: %V2F16 = fneg <2 x half> undef +; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:5 CodeSize:1 Lat:3 SizeLat:1 for: %V2F16M = fmul <2 x half> %V2F16, undef +; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:1 CodeSize:1 Lat:3 SizeLat:1 for: %V4F16 = fneg <4 x half> undef +; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:5 CodeSize:1 Lat:3 SizeLat:1 for: %V4F16M = fmul <4 x half> %V4F16, undef +; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:1 CodeSize:1 Lat:3 SizeLat:1 for: %V8F16 = fneg <8 x half> undef +; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:10 CodeSize:1 Lat:3 SizeLat:1 for: %V8F16M = fmul <8 x half> %V8F16, undef +; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V16F16 = fneg <16 x half> undef +; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:20 CodeSize:1 Lat:3 SizeLat:1 for: %V16F16M = fmul <16 x half> %V16F16, undef ; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void ; ; CHECK-FP16-LABEL: 'fmulneg_fp16' @@ -338,16 +330,16 @@ define void @fnegfmul() { define void @fnegfmul_fp16() { ; CHECK-BASE-LABEL: 'fnegfmul_fp16' -; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %F16M = fmul half undef, undef -; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %F16 = fneg half %F16M -; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V2F16M = fmul <2 x half> undef, undef -; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V2F16 = fneg <2 x half> %V2F16M -; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V4F16M = fmul <4 x half> undef, undef -; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V4F16 = fneg <4 x half> %V4F16M -; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V8F16M = fmul <8 x half> undef, undef -; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V8F16 = fneg <8 x half> %V8F16M -; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:4 CodeSize:1 Lat:3 SizeLat:1 for: %V16F16M = fmul <16 x half> undef, undef -; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:4 CodeSize:1 Lat:3 SizeLat:1 for: %V16F16 = fneg <16 x half> %V16F16M +; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:5 CodeSize:1 Lat:3 SizeLat:1 for: %F16M = fmul half undef, undef +; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:1 CodeSize:1 Lat:3 SizeLat:1 for: %F16 = fneg half %F16M +; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:5 CodeSize:1 Lat:3 SizeLat:1 for: %V2F16M = fmul <2 x half> undef, undef +; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:1 CodeSize:1 Lat:3 SizeLat:1 for: %V2F16 = fneg <2 x half> %V2F16M +; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:5 CodeSize:1 Lat:3 SizeLat:1 for: %V4F16M = fmul <4 x half> undef, undef +; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:1 CodeSize:1 Lat:3 SizeLat:1 for: %V4F16 = fneg <4 x half> %V4F16M +; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:10 CodeSize:1 Lat:3 SizeLat:1 for: %V8F16M = fmul <8 x half> undef, undef +; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:1 CodeSize:1 Lat:3 SizeLat:1 for: %V8F16 = fneg <8 x half> %V8F16M +; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:20 CodeSize:1 Lat:3 SizeLat:1 for: %V16F16M = fmul <16 x half> undef, undef +; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V16F16 = fneg <16 x half> %V16F16M ; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void ; ; CHECK-FP16-LABEL: 'fnegfmul_fp16' @@ -405,12 +397,19 @@ define void @fmul() { } define void @fmul_fp16() { -; CHECK-LABEL: 'fmul_fp16' -; CHECK-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %F16 = fmul half undef, undef -; CHECK-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V4F16 = fmul <4 x half> undef, undef -; CHECK-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V8F16 = fmul <8 x half> undef, undef -; CHECK-NEXT: Cost Model: Found costs of RThru:4 CodeSize:1 Lat:3 SizeLat:1 for: %V16F16 = fmul <16 x half> undef, undef -; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void +; CHECK-BASE-LABEL: 'fmul_fp16' +; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:5 CodeSize:1 Lat:3 SizeLat:1 for: %F16 = fmul half undef, undef +; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:5 CodeSize:1 Lat:3 SizeLat:1 for: %V4F16 = fmul <4 x half> undef, undef +; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:10 CodeSize:1 Lat:3 SizeLat:1 for: %V8F16 = fmul <8 x half> undef, undef +; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:20 CodeSize:1 Lat:3 SizeLat:1 for: %V16F16 = fmul <16 x half> undef, undef +; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void +; +; CHECK-FP16-LABEL: 'fmul_fp16' +; CHECK-FP16-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %F16 = fmul half undef, undef +; CHECK-FP16-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V4F16 = fmul <4 x half> undef, undef +; CHECK-FP16-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V8F16 = fmul <8 x half> undef, undef +; CHECK-FP16-NEXT: Cost Model: Found costs of RThru:4 CodeSize:1 Lat:3 SizeLat:1 for: %V16F16 = fmul <16 x half> undef, undef +; CHECK-FP16-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void ; %F16 = fmul half undef, undef %V4F16 = fmul <4 x half> undef, undef @@ -448,12 +447,19 @@ define void @fdiv() { } define void @fdiv_fp16() { -; CHECK-LABEL: 'fdiv_fp16' -; CHECK-NEXT: Cost Model: Found costs of RThru:2 CodeSize:4 Lat:4 SizeLat:4 for: %F16 = fdiv half undef, undef -; CHECK-NEXT: Cost Model: Found costs of RThru:2 CodeSize:4 Lat:4 SizeLat:4 for: %V4F16 = fdiv <4 x half> undef, undef -; CHECK-NEXT: Cost Model: Found costs of RThru:2 CodeSize:4 Lat:4 SizeLat:4 for: %V8F16 = fdiv <8 x half> undef, undef -; CHECK-NEXT: Cost Model: Found costs of 4 for: %V16F16 = fdiv <16 x half> undef, undef -; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void +; CHECK-BASE-LABEL: 'fdiv_fp16' +; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:5 CodeSize:4 Lat:4 SizeLat:4 for: %F16 = fdiv half undef, undef +; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:5 CodeSize:4 Lat:4 SizeLat:4 for: %V4F16 = fdiv <4 x half> undef, undef +; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:10 CodeSize:4 Lat:4 SizeLat:4 for: %V8F16 = fdiv <8 x half> undef, undef +; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:20 CodeSize:4 Lat:4 SizeLat:4 for: %V16F16 = fdiv <16 x half> undef, undef +; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void +; +; CHECK-FP16-LABEL: 'fdiv_fp16' +; CHECK-FP16-NEXT: Cost Model: Found costs of RThru:2 CodeSize:4 Lat:4 SizeLat:4 for: %F16 = fdiv half undef, undef +; CHECK-FP16-NEXT: Cost Model: Found costs of RThru:2 CodeSize:4 Lat:4 SizeLat:4 for: %V4F16 = fdiv <4 x half> undef, undef +; CHECK-FP16-NEXT: Cost Model: Found costs of RThru:2 CodeSize:4 Lat:4 SizeLat:4 for: %V8F16 = fdiv <8 x half> undef, undef +; CHECK-FP16-NEXT: Cost Model: Found costs of 4 for: %V16F16 = fdiv <16 x half> undef, undef +; CHECK-FP16-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void ; %F16 = fdiv half undef, undef %V4F16 = fdiv <4 x half> undef, undef @@ -491,12 +497,19 @@ define void @frem() { } define void @frem_fp16() { -; CHECK-LABEL: 'frem_fp16' -; CHECK-NEXT: Cost Model: Found costs of RThru:10 CodeSize:4 Lat:4 SizeLat:4 for: %F16 = frem half undef, undef -; CHECK-NEXT: Cost Model: Found costs of RThru:52 CodeSize:4 Lat:4 SizeLat:4 for: %V4F16 = frem <4 x half> undef, undef -; CHECK-NEXT: Cost Model: Found costs of RThru:108 CodeSize:4 Lat:4 SizeLat:4 for: %V8F16 = frem <8 x half> undef, undef -; CHECK-NEXT: Cost Model: Found costs of RThru:216 CodeSize:4 Lat:4 SizeLat:4 for: %V16F16 = frem <16 x half> undef, undef -; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void +; CHECK-BASE-LABEL: 'frem_fp16' +; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:13 CodeSize:4 Lat:4 SizeLat:4 for: %F16 = frem half undef, undef +; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:55 CodeSize:4 Lat:4 SizeLat:4 for: %V4F16 = frem <4 x half> undef, undef +; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:110 CodeSize:4 Lat:4 SizeLat:4 for: %V8F16 = frem <8 x half> undef, undef +; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:220 CodeSize:4 Lat:4 SizeLat:4 for: %V16F16 = frem <16 x half> undef, undef +; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void +; +; CHECK-FP16-LABEL: 'frem_fp16' +; CHECK-FP16-NEXT: Cost Model: Found costs of RThru:10 CodeSize:4 Lat:4 SizeLat:4 for: %F16 = frem half undef, undef +; CHECK-FP16-NEXT: Cost Model: Found costs of RThru:52 CodeSize:4 Lat:4 SizeLat:4 for: %V4F16 = frem <4 x half> undef, undef +; CHECK-FP16-NEXT: Cost Model: Found costs of RThru:108 CodeSize:4 Lat:4 SizeLat:4 for: %V8F16 = frem <8 x half> undef, undef +; CHECK-FP16-NEXT: Cost Model: Found costs of RThru:216 CodeSize:4 Lat:4 SizeLat:4 for: %V16F16 = frem <16 x half> undef, undef +; CHECK-FP16-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void ; %F16 = frem half undef, undef %V4F16 = frem <4 x half> undef, undef @@ -729,9 +742,9 @@ define void @fmuladd() { define void @fmuladd_fp16() { ; CHECK-BASE-LABEL: 'fmuladd_fp16' ; CHECK-BASE-NEXT: Cost Model: Found costs of 1 for: %F16 = call half @llvm.fmuladd.f16(half undef, half undef, half undef) -; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:4 CodeSize:2 Lat:6 SizeLat:2 for: %V4F16 = call <4 x half> @llvm.fmuladd.v4f16(<4 x half> undef, <4 x half> undef, <4 x half> undef) -; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:4 CodeSize:2 Lat:6 SizeLat:2 for: %V8F16 = call <8 x half> @llvm.fmuladd.v8f16(<8 x half> undef, <8 x half> undef, <8 x half> undef) -; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:8 CodeSize:2 Lat:6 SizeLat:2 for: %V16F16 = call <16 x half> @llvm.fmuladd.v16f16(<16 x half> undef, <16 x half> undef, <16 x half> undef) +; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:9 CodeSize:2 Lat:6 SizeLat:2 for: %V4F16 = call <4 x half> @llvm.fmuladd.v4f16(<4 x half> undef, <4 x half> undef, <4 x half> undef) +; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:18 CodeSize:2 Lat:6 SizeLat:2 for: %V8F16 = call <8 x half> @llvm.fmuladd.v8f16(<8 x half> undef, <8 x half> undef, <8 x half> undef) +; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:36 CodeSize:2 Lat:6 SizeLat:2 for: %V16F16 = call <16 x half> @llvm.fmuladd.v16f16(<16 x half> undef, <16 x half> undef, <16 x half> undef) ; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void ; ; CHECK-FP16-LABEL: 'fmuladd_fp16' diff --git a/llvm/test/Analysis/CostModel/AArch64/extract_float.ll b/llvm/test/Analysis/CostModel/AArch64/extract_float.ll index d2b75faa..c214021 100644 --- a/llvm/test/Analysis/CostModel/AArch64/extract_float.ll +++ b/llvm/test/Analysis/CostModel/AArch64/extract_float.ll @@ -11,6 +11,7 @@ define double @extract_case1(<2 x double> %a) { ; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %1 = extractelement <2 x double> %a, i32 1 ; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %res = fmul double %0, %1 ; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret double %res +; entry: %1 = extractelement <2 x double> %a, i32 0 %2 = extractelement <2 x double> %a, i32 1 @@ -24,6 +25,7 @@ define double @extract_case2(<2 x double> %a) { ; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %0 = extractelement <2 x double> %a, i32 1 ; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %res = fmul double %0, %0 ; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret double %res +; entry: %1 = extractelement <2 x double> %a, i32 1 %res = fmul double %1, %1 @@ -36,6 +38,7 @@ define double @extract_case3(<2 x double> %a) { ; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %0 = extractelement <2 x double> %a, i32 0 ; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %res = fmul double %0, %0 ; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret double %res +; entry: %1 = extractelement <2 x double> %a, i32 0 %res = fmul double %1, %1 @@ -48,6 +51,7 @@ define double @extract_case4(<2 x double> %a, double %b) { ; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %0 = extractelement <2 x double> %a, i32 0 ; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %res = fmul double %0, %b ; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret double %res +; entry: %1 = extractelement <2 x double> %a, i32 0 %res = fmul double %1, %b @@ -60,6 +64,7 @@ define double @extract_case5(<2 x double> %a, double %b) { ; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %0 = extractelement <2 x double> %a, i32 1 ; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %res = fmul double %0, %b ; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret double %res +; entry: %1 = extractelement <2 x double> %a, i32 1 %res = fmul double %1, %b @@ -74,6 +79,7 @@ define double @extract_case6(<3 x double> %a) { ; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %1 = extractelement <3 x double> %a, i32 1 ; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %res = fmul double %0, %1 ; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret double %res +; entry: %1 = extractelement <3 x double> %a, i32 0 %2 = extractelement <3 x double> %a, i32 1 @@ -90,6 +96,7 @@ define double @extract_case7(<4 x double> %a) { ; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %1 = extractelement <4 x double> %a, i32 2 ; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %res = fmul double %0, %1 ; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret double %res +; entry: %1 = extractelement <4 x double> %a, i32 1 %2 = extractelement <4 x double> %a, i32 2 @@ -108,6 +115,7 @@ define double @extract_case8(<2 x double> %a) { ; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %4 = fmul double %0, %1 ; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %5 = fmul double %3, %4 ; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret double %5 +; entry: %1 = extractelement <2 x double> %a, i32 0 %2 = extractelement <2 x double> %a, i32 1 @@ -129,6 +137,7 @@ define double @extract_case9(<2 x double> %a) { ; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %4 = fmul double %0, %1 ; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %5 = fmul double %3, %4 ; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret double %5 +; entry: %1 = extractelement <2 x double> %a, i32 0 %2 = extractelement <2 x double> %a, i32 1 @@ -148,6 +157,7 @@ define double @extract_case10(<4 x double> %a) { ; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: call void @foo(double %1) ; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %2 = fmul double %0, %1 ; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret double %2 +; entry: %1 = extractelement <4 x double> %a, i32 0 %2 = extractelement <4 x double> %a, i32 1 @@ -161,7 +171,7 @@ define half @extract_case11(<2 x half> %a) { ; NOFP16-LABEL: 'extract_case11' ; NOFP16-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %0 = extractelement <2 x half> %a, i32 0 ; NOFP16-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %1 = extractelement <2 x half> %a, i32 1 -; NOFP16-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %res = fmul half %0, %1 +; NOFP16-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %res = fmul half %0, %1 ; NOFP16-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret half %res ; ; FULLFP16-LABEL: 'extract_case11' @@ -169,6 +179,7 @@ define half @extract_case11(<2 x half> %a) { ; FULLFP16-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %1 = extractelement <2 x half> %a, i32 1 ; FULLFP16-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %res = fmul half %0, %1 ; FULLFP16-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret half %res +; entry: %1 = extractelement <2 x half> %a, i32 0 %2 = extractelement <2 x half> %a, i32 1 @@ -183,6 +194,7 @@ define float @extract_case12(<2 x float> %a) { ; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %1 = extractelement <2 x float> %a, i32 1 ; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %res = fmul float %0, %1 ; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret float %res +; entry: %1 = extractelement <2 x float> %a, i32 0 %2 = extractelement <2 x float> %a, i32 1 @@ -198,6 +210,7 @@ define double @extract_case13(<2 x double> %a) { ; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %1 = extractelement <2 x double> %a, i32 1 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %res = fadd double %0, %1 ; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret double %res +; entry: %1 = extractelement <2 x double> %a, i32 0 %2 = extractelement <2 x double> %a, i32 1 diff --git a/llvm/test/Analysis/CostModel/AArch64/histograms.ll b/llvm/test/Analysis/CostModel/AArch64/histograms.ll new file mode 100644 index 0000000..c048958 --- /dev/null +++ b/llvm/test/Analysis/CostModel/AArch64/histograms.ll @@ -0,0 +1,178 @@ +; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py UTC_ARGS: --version 5 +; RUN: opt < %s -passes="print<cost-model>" -cost-kind=throughput 2>&1 -disable-output -S -mtriple=aarch64--linux-gnu | FileCheck %s --check-prefix=CHECK-NEON +; RUN: opt < %s -passes="print<cost-model>" -cost-kind=throughput 2>&1 -disable-output -S -mtriple=aarch64--linux-gnu -mattr=+sve | FileCheck %s --check-prefix=CHECK-SVE +; RUN: opt < %s -passes="print<cost-model>" -cost-kind=throughput 2>&1 -disable-output -S -mtriple=aarch64--linux-gnu -mattr=+sve2 | FileCheck %s --check-prefix=CHECK-SVE2 + +define void @histograms() { +; CHECK-NEON-LABEL: 'histograms' +; CHECK-NEON-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.add.nxv2p0.i64(<vscale x 2 x ptr> poison, i64 1, <vscale x 2 x i1> poison) +; CHECK-NEON-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.add.nxv4p0.i32(<vscale x 4 x ptr> poison, i32 1, <vscale x 4 x i1> poison) +; CHECK-NEON-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.add.nxv8p0.i16(<vscale x 8 x ptr> poison, i16 1, <vscale x 8 x i1> poison) +; CHECK-NEON-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.add.nxv16p0.i8(<vscale x 16 x ptr> poison, i8 1, <vscale x 16 x i1> poison) +; CHECK-NEON-NEXT: Cost Model: Found an estimated cost of 10 for instruction: call void @llvm.experimental.vector.histogram.add.v2p0.i64(<2 x ptr> poison, i64 1, <2 x i1> poison) +; CHECK-NEON-NEXT: Cost Model: Found an estimated cost of 20 for instruction: call void @llvm.experimental.vector.histogram.add.v4p0.i32(<4 x ptr> poison, i32 1, <4 x i1> poison) +; CHECK-NEON-NEXT: Cost Model: Found an estimated cost of 40 for instruction: call void @llvm.experimental.vector.histogram.add.v8p0.i16(<8 x ptr> poison, i16 1, <8 x i1> poison) +; CHECK-NEON-NEXT: Cost Model: Found an estimated cost of 80 for instruction: call void @llvm.experimental.vector.histogram.add.v16p0.i8(<16 x ptr> poison, i8 1, <16 x i1> poison) +; CHECK-NEON-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.uadd.sat.nxv2p0.i64(<vscale x 2 x ptr> poison, i64 1, <vscale x 2 x i1> poison) +; CHECK-NEON-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.uadd.sat.nxv4p0.i32(<vscale x 4 x ptr> poison, i32 1, <vscale x 4 x i1> poison) +; CHECK-NEON-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.uadd.sat.nxv8p0.i16(<vscale x 8 x ptr> poison, i16 1, <vscale x 8 x i1> poison) +; CHECK-NEON-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.uadd.sat.nxv16p0.i8(<vscale x 16 x ptr> poison, i8 1, <vscale x 16 x i1> poison) +; CHECK-NEON-NEXT: Cost Model: Found an estimated cost of 12 for instruction: call void @llvm.experimental.vector.histogram.uadd.sat.v2p0.i64(<2 x ptr> poison, i64 1, <2 x i1> poison) +; CHECK-NEON-NEXT: Cost Model: Found an estimated cost of 24 for instruction: call void @llvm.experimental.vector.histogram.uadd.sat.v4p0.i32(<4 x ptr> poison, i32 1, <4 x i1> poison) +; CHECK-NEON-NEXT: Cost Model: Found an estimated cost of 64 for instruction: call void @llvm.experimental.vector.histogram.uadd.sat.v8p0.i16(<8 x ptr> poison, i16 1, <8 x i1> poison) +; CHECK-NEON-NEXT: Cost Model: Found an estimated cost of 128 for instruction: call void @llvm.experimental.vector.histogram.uadd.sat.v16p0.i8(<16 x ptr> poison, i8 1, <16 x i1> poison) +; CHECK-NEON-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.umax.nxv2p0.i64(<vscale x 2 x ptr> poison, i64 1, <vscale x 2 x i1> poison) +; CHECK-NEON-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.umax.nxv4p0.i32(<vscale x 4 x ptr> poison, i32 1, <vscale x 4 x i1> poison) +; CHECK-NEON-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.umax.nxv8p0.i16(<vscale x 8 x ptr> poison, i16 1, <vscale x 8 x i1> poison) +; CHECK-NEON-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.umax.nxv16p0.i8(<vscale x 16 x ptr> poison, i8 1, <vscale x 16 x i1> poison) +; CHECK-NEON-NEXT: Cost Model: Found an estimated cost of 12 for instruction: call void @llvm.experimental.vector.histogram.umax.v2p0.i64(<2 x ptr> poison, i64 1, <2 x i1> poison) +; CHECK-NEON-NEXT: Cost Model: Found an estimated cost of 24 for instruction: call void @llvm.experimental.vector.histogram.umax.v4p0.i32(<4 x ptr> poison, i32 1, <4 x i1> poison) +; CHECK-NEON-NEXT: Cost Model: Found an estimated cost of 48 for instruction: call void @llvm.experimental.vector.histogram.umax.v8p0.i16(<8 x ptr> poison, i16 1, <8 x i1> poison) +; CHECK-NEON-NEXT: Cost Model: Found an estimated cost of 96 for instruction: call void @llvm.experimental.vector.histogram.umax.v16p0.i8(<16 x ptr> poison, i8 1, <16 x i1> poison) +; CHECK-NEON-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.umin.nxv2p0.i64(<vscale x 2 x ptr> poison, i64 1, <vscale x 2 x i1> poison) +; CHECK-NEON-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.umin.nxv4p0.i32(<vscale x 4 x ptr> poison, i32 1, <vscale x 4 x i1> poison) +; CHECK-NEON-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.umin.nxv8p0.i16(<vscale x 8 x ptr> poison, i16 1, <vscale x 8 x i1> poison) +; CHECK-NEON-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.umin.nxv16p0.i8(<vscale x 16 x ptr> poison, i8 1, <vscale x 16 x i1> poison) +; CHECK-NEON-NEXT: Cost Model: Found an estimated cost of 12 for instruction: call void @llvm.experimental.vector.histogram.umin.v2p0.i64(<2 x ptr> poison, i64 1, <2 x i1> poison) +; CHECK-NEON-NEXT: Cost Model: Found an estimated cost of 24 for instruction: call void @llvm.experimental.vector.histogram.umin.v4p0.i32(<4 x ptr> poison, i32 1, <4 x i1> poison) +; CHECK-NEON-NEXT: Cost Model: Found an estimated cost of 48 for instruction: call void @llvm.experimental.vector.histogram.umin.v8p0.i16(<8 x ptr> poison, i16 1, <8 x i1> poison) +; CHECK-NEON-NEXT: Cost Model: Found an estimated cost of 96 for instruction: call void @llvm.experimental.vector.histogram.umin.v16p0.i8(<16 x ptr> poison, i8 1, <16 x i1> poison) +; CHECK-NEON-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; CHECK-SVE-LABEL: 'histograms' +; CHECK-SVE-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.add.nxv2p0.i64(<vscale x 2 x ptr> poison, i64 1, <vscale x 2 x i1> poison) +; CHECK-SVE-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.add.nxv4p0.i32(<vscale x 4 x ptr> poison, i32 1, <vscale x 4 x i1> poison) +; CHECK-SVE-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.add.nxv8p0.i16(<vscale x 8 x ptr> poison, i16 1, <vscale x 8 x i1> poison) +; CHECK-SVE-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.add.nxv16p0.i8(<vscale x 16 x ptr> poison, i8 1, <vscale x 16 x i1> poison) +; CHECK-SVE-NEXT: Cost Model: Found an estimated cost of 10 for instruction: call void @llvm.experimental.vector.histogram.add.v2p0.i64(<2 x ptr> poison, i64 1, <2 x i1> poison) +; CHECK-SVE-NEXT: Cost Model: Found an estimated cost of 20 for instruction: call void @llvm.experimental.vector.histogram.add.v4p0.i32(<4 x ptr> poison, i32 1, <4 x i1> poison) +; CHECK-SVE-NEXT: Cost Model: Found an estimated cost of 40 for instruction: call void @llvm.experimental.vector.histogram.add.v8p0.i16(<8 x ptr> poison, i16 1, <8 x i1> poison) +; CHECK-SVE-NEXT: Cost Model: Found an estimated cost of 80 for instruction: call void @llvm.experimental.vector.histogram.add.v16p0.i8(<16 x ptr> poison, i8 1, <16 x i1> poison) +; CHECK-SVE-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.uadd.sat.nxv2p0.i64(<vscale x 2 x ptr> poison, i64 1, <vscale x 2 x i1> poison) +; CHECK-SVE-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.uadd.sat.nxv4p0.i32(<vscale x 4 x ptr> poison, i32 1, <vscale x 4 x i1> poison) +; CHECK-SVE-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.uadd.sat.nxv8p0.i16(<vscale x 8 x ptr> poison, i16 1, <vscale x 8 x i1> poison) +; CHECK-SVE-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.uadd.sat.nxv16p0.i8(<vscale x 16 x ptr> poison, i8 1, <vscale x 16 x i1> poison) +; CHECK-SVE-NEXT: Cost Model: Found an estimated cost of 12 for instruction: call void @llvm.experimental.vector.histogram.uadd.sat.v2p0.i64(<2 x ptr> poison, i64 1, <2 x i1> poison) +; CHECK-SVE-NEXT: Cost Model: Found an estimated cost of 24 for instruction: call void @llvm.experimental.vector.histogram.uadd.sat.v4p0.i32(<4 x ptr> poison, i32 1, <4 x i1> poison) +; CHECK-SVE-NEXT: Cost Model: Found an estimated cost of 64 for instruction: call void @llvm.experimental.vector.histogram.uadd.sat.v8p0.i16(<8 x ptr> poison, i16 1, <8 x i1> poison) +; CHECK-SVE-NEXT: Cost Model: Found an estimated cost of 128 for instruction: call void @llvm.experimental.vector.histogram.uadd.sat.v16p0.i8(<16 x ptr> poison, i8 1, <16 x i1> poison) +; CHECK-SVE-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.umax.nxv2p0.i64(<vscale x 2 x ptr> poison, i64 1, <vscale x 2 x i1> poison) +; CHECK-SVE-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.umax.nxv4p0.i32(<vscale x 4 x ptr> poison, i32 1, <vscale x 4 x i1> poison) +; CHECK-SVE-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.umax.nxv8p0.i16(<vscale x 8 x ptr> poison, i16 1, <vscale x 8 x i1> poison) +; CHECK-SVE-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.umax.nxv16p0.i8(<vscale x 16 x ptr> poison, i8 1, <vscale x 16 x i1> poison) +; CHECK-SVE-NEXT: Cost Model: Found an estimated cost of 12 for instruction: call void @llvm.experimental.vector.histogram.umax.v2p0.i64(<2 x ptr> poison, i64 1, <2 x i1> poison) +; CHECK-SVE-NEXT: Cost Model: Found an estimated cost of 24 for instruction: call void @llvm.experimental.vector.histogram.umax.v4p0.i32(<4 x ptr> poison, i32 1, <4 x i1> poison) +; CHECK-SVE-NEXT: Cost Model: Found an estimated cost of 48 for instruction: call void @llvm.experimental.vector.histogram.umax.v8p0.i16(<8 x ptr> poison, i16 1, <8 x i1> poison) +; CHECK-SVE-NEXT: Cost Model: Found an estimated cost of 96 for instruction: call void @llvm.experimental.vector.histogram.umax.v16p0.i8(<16 x ptr> poison, i8 1, <16 x i1> poison) +; CHECK-SVE-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.umin.nxv2p0.i64(<vscale x 2 x ptr> poison, i64 1, <vscale x 2 x i1> poison) +; CHECK-SVE-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.umin.nxv4p0.i32(<vscale x 4 x ptr> poison, i32 1, <vscale x 4 x i1> poison) +; CHECK-SVE-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.umin.nxv8p0.i16(<vscale x 8 x ptr> poison, i16 1, <vscale x 8 x i1> poison) +; CHECK-SVE-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.umin.nxv16p0.i8(<vscale x 16 x ptr> poison, i8 1, <vscale x 16 x i1> poison) +; CHECK-SVE-NEXT: Cost Model: Found an estimated cost of 12 for instruction: call void @llvm.experimental.vector.histogram.umin.v2p0.i64(<2 x ptr> poison, i64 1, <2 x i1> poison) +; CHECK-SVE-NEXT: Cost Model: Found an estimated cost of 24 for instruction: call void @llvm.experimental.vector.histogram.umin.v4p0.i32(<4 x ptr> poison, i32 1, <4 x i1> poison) +; CHECK-SVE-NEXT: Cost Model: Found an estimated cost of 48 for instruction: call void @llvm.experimental.vector.histogram.umin.v8p0.i16(<8 x ptr> poison, i16 1, <8 x i1> poison) +; CHECK-SVE-NEXT: Cost Model: Found an estimated cost of 96 for instruction: call void @llvm.experimental.vector.histogram.umin.v16p0.i8(<16 x ptr> poison, i8 1, <16 x i1> poison) +; CHECK-SVE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; CHECK-SVE2-LABEL: 'histograms' +; CHECK-SVE2-NEXT: Cost Model: Found an estimated cost of 8 for instruction: call void @llvm.experimental.vector.histogram.add.nxv2p0.i64(<vscale x 2 x ptr> poison, i64 1, <vscale x 2 x i1> poison) +; CHECK-SVE2-NEXT: Cost Model: Found an estimated cost of 8 for instruction: call void @llvm.experimental.vector.histogram.add.nxv4p0.i32(<vscale x 4 x ptr> poison, i32 1, <vscale x 4 x i1> poison) +; CHECK-SVE2-NEXT: Cost Model: Found an estimated cost of 16 for instruction: call void @llvm.experimental.vector.histogram.add.nxv8p0.i16(<vscale x 8 x ptr> poison, i16 1, <vscale x 8 x i1> poison) +; CHECK-SVE2-NEXT: Cost Model: Found an estimated cost of 32 for instruction: call void @llvm.experimental.vector.histogram.add.nxv16p0.i8(<vscale x 16 x ptr> poison, i8 1, <vscale x 16 x i1> poison) +; CHECK-SVE2-NEXT: Cost Model: Found an estimated cost of 10 for instruction: call void @llvm.experimental.vector.histogram.add.v2p0.i64(<2 x ptr> poison, i64 1, <2 x i1> poison) +; CHECK-SVE2-NEXT: Cost Model: Found an estimated cost of 20 for instruction: call void @llvm.experimental.vector.histogram.add.v4p0.i32(<4 x ptr> poison, i32 1, <4 x i1> poison) +; CHECK-SVE2-NEXT: Cost Model: Found an estimated cost of 40 for instruction: call void @llvm.experimental.vector.histogram.add.v8p0.i16(<8 x ptr> poison, i16 1, <8 x i1> poison) +; CHECK-SVE2-NEXT: Cost Model: Found an estimated cost of 80 for instruction: call void @llvm.experimental.vector.histogram.add.v16p0.i8(<16 x ptr> poison, i8 1, <16 x i1> poison) +; CHECK-SVE2-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.uadd.sat.nxv2p0.i64(<vscale x 2 x ptr> poison, i64 1, <vscale x 2 x i1> poison) +; CHECK-SVE2-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.uadd.sat.nxv4p0.i32(<vscale x 4 x ptr> poison, i32 1, <vscale x 4 x i1> poison) +; CHECK-SVE2-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.uadd.sat.nxv8p0.i16(<vscale x 8 x ptr> poison, i16 1, <vscale x 8 x i1> poison) +; CHECK-SVE2-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.uadd.sat.nxv16p0.i8(<vscale x 16 x ptr> poison, i8 1, <vscale x 16 x i1> poison) +; CHECK-SVE2-NEXT: Cost Model: Found an estimated cost of 12 for instruction: call void @llvm.experimental.vector.histogram.uadd.sat.v2p0.i64(<2 x ptr> poison, i64 1, <2 x i1> poison) +; CHECK-SVE2-NEXT: Cost Model: Found an estimated cost of 24 for instruction: call void @llvm.experimental.vector.histogram.uadd.sat.v4p0.i32(<4 x ptr> poison, i32 1, <4 x i1> poison) +; CHECK-SVE2-NEXT: Cost Model: Found an estimated cost of 64 for instruction: call void @llvm.experimental.vector.histogram.uadd.sat.v8p0.i16(<8 x ptr> poison, i16 1, <8 x i1> poison) +; CHECK-SVE2-NEXT: Cost Model: Found an estimated cost of 128 for instruction: call void @llvm.experimental.vector.histogram.uadd.sat.v16p0.i8(<16 x ptr> poison, i8 1, <16 x i1> poison) +; CHECK-SVE2-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.umax.nxv2p0.i64(<vscale x 2 x ptr> poison, i64 1, <vscale x 2 x i1> poison) +; CHECK-SVE2-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.umax.nxv4p0.i32(<vscale x 4 x ptr> poison, i32 1, <vscale x 4 x i1> poison) +; CHECK-SVE2-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.umax.nxv8p0.i16(<vscale x 8 x ptr> poison, i16 1, <vscale x 8 x i1> poison) +; CHECK-SVE2-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.umax.nxv16p0.i8(<vscale x 16 x ptr> poison, i8 1, <vscale x 16 x i1> poison) +; CHECK-SVE2-NEXT: Cost Model: Found an estimated cost of 12 for instruction: call void @llvm.experimental.vector.histogram.umax.v2p0.i64(<2 x ptr> poison, i64 1, <2 x i1> poison) +; CHECK-SVE2-NEXT: Cost Model: Found an estimated cost of 24 for instruction: call void @llvm.experimental.vector.histogram.umax.v4p0.i32(<4 x ptr> poison, i32 1, <4 x i1> poison) +; CHECK-SVE2-NEXT: Cost Model: Found an estimated cost of 48 for instruction: call void @llvm.experimental.vector.histogram.umax.v8p0.i16(<8 x ptr> poison, i16 1, <8 x i1> poison) +; CHECK-SVE2-NEXT: Cost Model: Found an estimated cost of 96 for instruction: call void @llvm.experimental.vector.histogram.umax.v16p0.i8(<16 x ptr> poison, i8 1, <16 x i1> poison) +; CHECK-SVE2-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.umin.nxv2p0.i64(<vscale x 2 x ptr> poison, i64 1, <vscale x 2 x i1> poison) +; CHECK-SVE2-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.umin.nxv4p0.i32(<vscale x 4 x ptr> poison, i32 1, <vscale x 4 x i1> poison) +; CHECK-SVE2-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.umin.nxv8p0.i16(<vscale x 8 x ptr> poison, i16 1, <vscale x 8 x i1> poison) +; CHECK-SVE2-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.umin.nxv16p0.i8(<vscale x 16 x ptr> poison, i8 1, <vscale x 16 x i1> poison) +; CHECK-SVE2-NEXT: Cost Model: Found an estimated cost of 12 for instruction: call void @llvm.experimental.vector.histogram.umin.v2p0.i64(<2 x ptr> poison, i64 1, <2 x i1> poison) +; CHECK-SVE2-NEXT: Cost Model: Found an estimated cost of 24 for instruction: call void @llvm.experimental.vector.histogram.umin.v4p0.i32(<4 x ptr> poison, i32 1, <4 x i1> poison) +; CHECK-SVE2-NEXT: Cost Model: Found an estimated cost of 48 for instruction: call void @llvm.experimental.vector.histogram.umin.v8p0.i16(<8 x ptr> poison, i16 1, <8 x i1> poison) +; CHECK-SVE2-NEXT: Cost Model: Found an estimated cost of 96 for instruction: call void @llvm.experimental.vector.histogram.umin.v16p0.i8(<16 x ptr> poison, i8 1, <16 x i1> poison) +; CHECK-SVE2-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; + call void @llvm.experimental.vector.histogram.add.nxv2p0.i64(<vscale x 2 x ptr> poison, i64 1, <vscale x 2 x i1> poison) + call void @llvm.experimental.vector.histogram.add.nxv4p0.i32(<vscale x 4 x ptr> poison, i32 1, <vscale x 4 x i1> poison) + call void @llvm.experimental.vector.histogram.add.nxv8p0.i16(<vscale x 8 x ptr> poison, i16 1, <vscale x 8 x i1> poison) + call void @llvm.experimental.vector.histogram.add.nxv16p0.i8(<vscale x 16 x ptr> poison, i8 1, <vscale x 16 x i1> poison) + call void @llvm.experimental.vector.histogram.add.v2p0.i64(<2 x ptr> poison, i64 1, <2 x i1> poison) + call void @llvm.experimental.vector.histogram.add.v4p0.i32(<4 x ptr> poison, i32 1, <4 x i1> poison) + call void @llvm.experimental.vector.histogram.add.v8p0.i16(<8 x ptr> poison, i16 1, <8 x i1> poison) + call void @llvm.experimental.vector.histogram.add.v16p0.i8(<16 x ptr> poison, i8 1, <16 x i1> poison) + call void @llvm.experimental.vector.histogram.uadd.sat.nxv2p0.i64(<vscale x 2 x ptr> poison, i64 1, <vscale x 2 x i1> poison) + call void @llvm.experimental.vector.histogram.uadd.sat.nxv4p0.i32(<vscale x 4 x ptr> poison, i32 1, <vscale x 4 x i1> poison) + call void @llvm.experimental.vector.histogram.uadd.sat.nxv8p0.i16(<vscale x 8 x ptr> poison, i16 1, <vscale x 8 x i1> poison) + call void @llvm.experimental.vector.histogram.uadd.sat.nxv16p0.i8(<vscale x 16 x ptr> poison, i8 1, <vscale x 16 x i1> poison) + call void @llvm.experimental.vector.histogram.uadd.sat.v2p0.i64(<2 x ptr> poison, i64 1, <2 x i1> poison) + call void @llvm.experimental.vector.histogram.uadd.sat.v4p0.i32(<4 x ptr> poison, i32 1, <4 x i1> poison) + call void @llvm.experimental.vector.histogram.uadd.sat.v8p0.i16(<8 x ptr> poison, i16 1, <8 x i1> poison) + call void @llvm.experimental.vector.histogram.uadd.sat.v16p0.i8(<16 x ptr> poison, i8 1, <16 x i1> poison) + call void @llvm.experimental.vector.histogram.umax.nxv2p0.i64(<vscale x 2 x ptr> poison, i64 1, <vscale x 2 x i1> poison) + call void @llvm.experimental.vector.histogram.umax.nxv4p0.i32(<vscale x 4 x ptr> poison, i32 1, <vscale x 4 x i1> poison) + call void @llvm.experimental.vector.histogram.umax.nxv8p0.i16(<vscale x 8 x ptr> poison, i16 1, <vscale x 8 x i1> poison) + call void @llvm.experimental.vector.histogram.umax.nxv16p0.i8(<vscale x 16 x ptr> poison, i8 1, <vscale x 16 x i1> poison) + call void @llvm.experimental.vector.histogram.umax.v2p0.i64(<2 x ptr> poison, i64 1, <2 x i1> poison) + call void @llvm.experimental.vector.histogram.umax.v4p0.i32(<4 x ptr> poison, i32 1, <4 x i1> poison) + call void @llvm.experimental.vector.histogram.umax.v8p0.i16(<8 x ptr> poison, i16 1, <8 x i1> poison) + call void @llvm.experimental.vector.histogram.umax.v16p0.i8(<16 x ptr> poison, i8 1, <16 x i1> poison) + call void @llvm.experimental.vector.histogram.umin.nxv2p0.i64(<vscale x 2 x ptr> poison, i64 1, <vscale x 2 x i1> poison) + call void @llvm.experimental.vector.histogram.umin.nxv4p0.i32(<vscale x 4 x ptr> poison, i32 1, <vscale x 4 x i1> poison) + call void @llvm.experimental.vector.histogram.umin.nxv8p0.i16(<vscale x 8 x ptr> poison, i16 1, <vscale x 8 x i1> poison) + call void @llvm.experimental.vector.histogram.umin.nxv16p0.i8(<vscale x 16 x ptr> poison, i8 1, <vscale x 16 x i1> poison) + call void @llvm.experimental.vector.histogram.umin.v2p0.i64(<2 x ptr> poison, i64 1, <2 x i1> poison) + call void @llvm.experimental.vector.histogram.umin.v4p0.i32(<4 x ptr> poison, i32 1, <4 x i1> poison) + call void @llvm.experimental.vector.histogram.umin.v8p0.i16(<8 x ptr> poison, i16 1, <8 x i1> poison) + call void @llvm.experimental.vector.histogram.umin.v16p0.i8(<16 x ptr> poison, i8 1, <16 x i1> poison) + ret void +} + +declare void @llvm.experimental.vector.histogram.add.nxv2p0.i64(<vscale x 2 x ptr>, i64, <vscale x 2 x i1>) +declare void @llvm.experimental.vector.histogram.add.nxv4p0.i32(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>) +declare void @llvm.experimental.vector.histogram.add.nxv8p0.i16(<vscale x 8 x ptr>, i16, <vscale x 8 x i1>) +declare void @llvm.experimental.vector.histogram.add.nxv16p0.i8(<vscale x 16 x ptr>, i8, <vscale x 16 x i1>) +declare void @llvm.experimental.vector.histogram.add.v2p0.i64(<2 x ptr>, i64, <2 x i1>) +declare void @llvm.experimental.vector.histogram.add.v4p0.i32(<4 x ptr>, i32, <4 x i1>) +declare void @llvm.experimental.vector.histogram.add.v8p0.i16(<8 x ptr>, i16, <8 x i1>) +declare void @llvm.experimental.vector.histogram.add.v16p0.i8(<16 x ptr>, i8, <16 x i1>) +declare void @llvm.experimental.vector.histogram.uadd.sat.nxv2p0.i64(<vscale x 2 x ptr>, i64, <vscale x 2 x i1>) +declare void @llvm.experimental.vector.histogram.uadd.sat.nxv4p0.i32(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>) +declare void @llvm.experimental.vector.histogram.uadd.sat.nxv8p0.i16(<vscale x 8 x ptr>, i16, <vscale x 8 x i1>) +declare void @llvm.experimental.vector.histogram.uadd.sat.nxv16p0.i8(<vscale x 16 x ptr>, i8, <vscale x 16 x i1>) +declare void @llvm.experimental.vector.histogram.uadd.sat.v2p0.i64(<2 x ptr>, i64, <2 x i1>) +declare void @llvm.experimental.vector.histogram.uadd.sat.v4p0.i32(<4 x ptr>, i32, <4 x i1>) +declare void @llvm.experimental.vector.histogram.uadd.sat.v8p0.i16(<8 x ptr>, i16, <8 x i1>) +declare void @llvm.experimental.vector.histogram.uadd.sat.v16p0.i8(<16 x ptr>, i8, <16 x i1>) +declare void @llvm.experimental.vector.histogram.umax.nxv2p0.i64(<vscale x 2 x ptr>, i64, <vscale x 2 x i1>) +declare void @llvm.experimental.vector.histogram.umax.nxv4p0.i32(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>) +declare void @llvm.experimental.vector.histogram.umax.nxv8p0.i16(<vscale x 8 x ptr>, i16, <vscale x 8 x i1>) +declare void @llvm.experimental.vector.histogram.umax.nxv16p0.i8(<vscale x 16 x ptr>, i8, <vscale x 16 x i1>) +declare void @llvm.experimental.vector.histogram.umax.v2p0.i64(<2 x ptr>, i64, <2 x i1>) +declare void @llvm.experimental.vector.histogram.umax.v4p0.i32(<4 x ptr>, i32, <4 x i1>) +declare void @llvm.experimental.vector.histogram.umax.v8p0.i16(<8 x ptr>, i16, <8 x i1>) +declare void @llvm.experimental.vector.histogram.umax.v16p0.i8(<16 x ptr>, i8, <16 x i1>) +declare void @llvm.experimental.vector.histogram.umin.nxv2p0.i64(<vscale x 2 x ptr>, i64, <vscale x 2 x i1>) +declare void @llvm.experimental.vector.histogram.umin.nxv4p0.i32(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>) +declare void @llvm.experimental.vector.histogram.umin.nxv8p0.i16(<vscale x 8 x ptr>, i16, <vscale x 8 x i1>) +declare void @llvm.experimental.vector.histogram.umin.nxv16p0.i8(<vscale x 16 x ptr>, i8, <vscale x 16 x i1>) +declare void @llvm.experimental.vector.histogram.umin.v2p0.i64(<2 x ptr>, i64, <2 x i1>) +declare void @llvm.experimental.vector.histogram.umin.v4p0.i32(<4 x ptr>, i32, <4 x i1>) +declare void @llvm.experimental.vector.histogram.umin.v8p0.i16(<8 x ptr>, i16, <8 x i1>) +declare void @llvm.experimental.vector.histogram.umin.v16p0.i8(<16 x ptr>, i8, <16 x i1>) diff --git a/llvm/test/Analysis/CostModel/AArch64/reduce-fadd.ll b/llvm/test/Analysis/CostModel/AArch64/reduce-fadd.ll index f565924..c4236d2 100644 --- a/llvm/test/Analysis/CostModel/AArch64/reduce-fadd.ll +++ b/llvm/test/Analysis/CostModel/AArch64/reduce-fadd.ll @@ -26,10 +26,10 @@ define void @strict_fp_reductions() { define void @strict_fp_reductions_fp16() { ; CHECK-NOFP16-LABEL: 'strict_fp_reductions_fp16' -; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:8 CodeSize:5 Lat:10 SizeLat:6 for: %fadd_v2f16 = call half @llvm.vector.reduce.fadd.v2f16(half 0xH0000, <2 x half> undef) -; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:18 CodeSize:11 Lat:22 SizeLat:14 for: %fadd_v4f16 = call half @llvm.vector.reduce.fadd.v4f16(half 0xH0000, <4 x half> undef) -; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:38 CodeSize:23 Lat:46 SizeLat:30 for: %fadd_v8f16 = call half @llvm.vector.reduce.fadd.v8f16(half 0xH0000, <8 x half> undef) -; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:76 CodeSize:46 Lat:92 SizeLat:60 for: %fadd_v16f16 = call half @llvm.vector.reduce.fadd.v16f16(half 0xH0000, <16 x half> undef) +; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:12 CodeSize:5 Lat:10 SizeLat:6 for: %fadd_v2f16 = call half @llvm.vector.reduce.fadd.v2f16(half 0xH0000, <2 x half> undef) +; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:26 CodeSize:11 Lat:22 SizeLat:14 for: %fadd_v4f16 = call half @llvm.vector.reduce.fadd.v4f16(half 0xH0000, <4 x half> undef) +; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:54 CodeSize:23 Lat:46 SizeLat:30 for: %fadd_v8f16 = call half @llvm.vector.reduce.fadd.v8f16(half 0xH0000, <8 x half> undef) +; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:108 CodeSize:46 Lat:92 SizeLat:60 for: %fadd_v16f16 = call half @llvm.vector.reduce.fadd.v16f16(half 0xH0000, <16 x half> undef) ; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void ; ; CHECK-F16-LABEL: 'strict_fp_reductions_fp16' @@ -40,10 +40,10 @@ define void @strict_fp_reductions_fp16() { ; CHECK-F16-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void ; ; CHECK-BF16-LABEL: 'strict_fp_reductions_fp16' -; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:8 CodeSize:5 Lat:10 SizeLat:6 for: %fadd_v2f16 = call half @llvm.vector.reduce.fadd.v2f16(half 0xH0000, <2 x half> undef) -; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:18 CodeSize:11 Lat:22 SizeLat:14 for: %fadd_v4f16 = call half @llvm.vector.reduce.fadd.v4f16(half 0xH0000, <4 x half> undef) -; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:38 CodeSize:23 Lat:46 SizeLat:30 for: %fadd_v8f16 = call half @llvm.vector.reduce.fadd.v8f16(half 0xH0000, <8 x half> undef) -; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:76 CodeSize:46 Lat:92 SizeLat:60 for: %fadd_v16f16 = call half @llvm.vector.reduce.fadd.v16f16(half 0xH0000, <16 x half> undef) +; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:12 CodeSize:5 Lat:10 SizeLat:6 for: %fadd_v2f16 = call half @llvm.vector.reduce.fadd.v2f16(half 0xH0000, <2 x half> undef) +; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:26 CodeSize:11 Lat:22 SizeLat:14 for: %fadd_v4f16 = call half @llvm.vector.reduce.fadd.v4f16(half 0xH0000, <4 x half> undef) +; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:54 CodeSize:23 Lat:46 SizeLat:30 for: %fadd_v8f16 = call half @llvm.vector.reduce.fadd.v8f16(half 0xH0000, <8 x half> undef) +; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:108 CodeSize:46 Lat:92 SizeLat:60 for: %fadd_v16f16 = call half @llvm.vector.reduce.fadd.v16f16(half 0xH0000, <16 x half> undef) ; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void ; %fadd_v2f16 = call half @llvm.vector.reduce.fadd.v2f16(half 0.0, <2 x half> undef) @@ -55,15 +55,15 @@ define void @strict_fp_reductions_fp16() { define void @strict_fp_reductions_bf16() { ; CHECK-NOFP16-LABEL: 'strict_fp_reductions_bf16' -; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:18 CodeSize:11 Lat:22 SizeLat:14 for: %fadd_v4f8 = call bfloat @llvm.vector.reduce.fadd.v4bf16(bfloat 0xR0000, <4 x bfloat> undef) +; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:54 CodeSize:11 Lat:22 SizeLat:14 for: %fadd_v4f8 = call bfloat @llvm.vector.reduce.fadd.v4bf16(bfloat 0xR0000, <4 x bfloat> undef) ; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void ; ; CHECK-F16-LABEL: 'strict_fp_reductions_bf16' -; CHECK-F16-NEXT: Cost Model: Found costs of RThru:18 CodeSize:11 Lat:22 SizeLat:14 for: %fadd_v4f8 = call bfloat @llvm.vector.reduce.fadd.v4bf16(bfloat 0xR0000, <4 x bfloat> undef) +; CHECK-F16-NEXT: Cost Model: Found costs of RThru:54 CodeSize:11 Lat:22 SizeLat:14 for: %fadd_v4f8 = call bfloat @llvm.vector.reduce.fadd.v4bf16(bfloat 0xR0000, <4 x bfloat> undef) ; CHECK-F16-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void ; ; CHECK-BF16-LABEL: 'strict_fp_reductions_bf16' -; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:14 CodeSize:11 Lat:22 SizeLat:14 for: %fadd_v4f8 = call bfloat @llvm.vector.reduce.fadd.v4bf16(bfloat 0xR0000, <4 x bfloat> undef) +; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:26 CodeSize:11 Lat:22 SizeLat:14 for: %fadd_v4f8 = call bfloat @llvm.vector.reduce.fadd.v4bf16(bfloat 0xR0000, <4 x bfloat> undef) ; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void ; %fadd_v4f8 = call bfloat @llvm.vector.reduce.fadd.v4f8(bfloat 0.0, <4 x bfloat> undef) @@ -117,16 +117,16 @@ define void @fast_fp_reductions() { define void @fast_fp_reductions_fp16() { ; CHECK-NOFP16-LABEL: 'fast_fp_reductions_fp16' -; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:5 CodeSize:4 Lat:6 SizeLat:4 for: %fadd_v2f16_fast = call fast half @llvm.vector.reduce.fadd.v2f16(half 0xH0000, <2 x half> undef) -; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:5 CodeSize:4 Lat:6 SizeLat:4 for: %fadd_v2f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v2f16(half 0xH0000, <2 x half> undef) -; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:10 CodeSize:8 Lat:12 SizeLat:8 for: %fadd_v4f16_fast = call fast half @llvm.vector.reduce.fadd.v4f16(half 0xH0000, <4 x half> undef) -; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:10 CodeSize:8 Lat:12 SizeLat:8 for: %fadd_v4f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v4f16(half 0xH0000, <4 x half> undef) -; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:30 CodeSize:27 Lat:33 SizeLat:27 for: %fadd_v8f16 = call fast half @llvm.vector.reduce.fadd.v8f16(half 0xH0000, <8 x half> undef) -; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:30 CodeSize:27 Lat:33 SizeLat:27 for: %fadd_v8f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v8f16(half 0xH0000, <8 x half> undef) -; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:48 CodeSize:44 Lat:52 SizeLat:44 for: %fadd_v16f16 = call fast half @llvm.vector.reduce.fadd.v16f16(half 0xH0000, <16 x half> undef) -; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:48 CodeSize:44 Lat:52 SizeLat:44 for: %fadd_v16f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v16f16(half 0xH0000, <16 x half> undef) -; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:38 CodeSize:35 Lat:41 SizeLat:35 for: %fadd_v11f16 = call fast half @llvm.vector.reduce.fadd.v11f16(half 0xH0000, <11 x half> undef) -; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:38 CodeSize:35 Lat:41 SizeLat:35 for: %fadd_v13f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v13f16(half 0xH0000, <13 x half> undef) +; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:7 CodeSize:4 Lat:6 SizeLat:4 for: %fadd_v2f16_fast = call fast half @llvm.vector.reduce.fadd.v2f16(half 0xH0000, <2 x half> undef) +; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:7 CodeSize:4 Lat:6 SizeLat:4 for: %fadd_v2f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v2f16(half 0xH0000, <2 x half> undef) +; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:14 CodeSize:8 Lat:12 SizeLat:8 for: %fadd_v4f16_fast = call fast half @llvm.vector.reduce.fadd.v4f16(half 0xH0000, <4 x half> undef) +; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:14 CodeSize:8 Lat:12 SizeLat:8 for: %fadd_v4f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v4f16(half 0xH0000, <4 x half> undef) +; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:48 CodeSize:27 Lat:33 SizeLat:27 for: %fadd_v8f16 = call fast half @llvm.vector.reduce.fadd.v8f16(half 0xH0000, <8 x half> undef) +; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:48 CodeSize:27 Lat:33 SizeLat:27 for: %fadd_v8f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v8f16(half 0xH0000, <8 x half> undef) +; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:72 CodeSize:44 Lat:52 SizeLat:44 for: %fadd_v16f16 = call fast half @llvm.vector.reduce.fadd.v16f16(half 0xH0000, <16 x half> undef) +; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:72 CodeSize:44 Lat:52 SizeLat:44 for: %fadd_v16f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v16f16(half 0xH0000, <16 x half> undef) +; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:203 CodeSize:35 Lat:41 SizeLat:35 for: %fadd_v11f16 = call fast half @llvm.vector.reduce.fadd.v11f16(half 0xH0000, <11 x half> undef) +; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:248 CodeSize:35 Lat:41 SizeLat:35 for: %fadd_v13f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v13f16(half 0xH0000, <13 x half> undef) ; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void ; ; CHECK-F16-LABEL: 'fast_fp_reductions_fp16' @@ -143,16 +143,16 @@ define void @fast_fp_reductions_fp16() { ; CHECK-F16-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void ; ; CHECK-BF16-LABEL: 'fast_fp_reductions_fp16' -; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:5 CodeSize:4 Lat:6 SizeLat:4 for: %fadd_v2f16_fast = call fast half @llvm.vector.reduce.fadd.v2f16(half 0xH0000, <2 x half> undef) -; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:5 CodeSize:4 Lat:6 SizeLat:4 for: %fadd_v2f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v2f16(half 0xH0000, <2 x half> undef) -; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:10 CodeSize:8 Lat:12 SizeLat:8 for: %fadd_v4f16_fast = call fast half @llvm.vector.reduce.fadd.v4f16(half 0xH0000, <4 x half> undef) -; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:10 CodeSize:8 Lat:12 SizeLat:8 for: %fadd_v4f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v4f16(half 0xH0000, <4 x half> undef) -; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:30 CodeSize:27 Lat:33 SizeLat:27 for: %fadd_v8f16 = call fast half @llvm.vector.reduce.fadd.v8f16(half 0xH0000, <8 x half> undef) -; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:30 CodeSize:27 Lat:33 SizeLat:27 for: %fadd_v8f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v8f16(half 0xH0000, <8 x half> undef) -; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:48 CodeSize:44 Lat:52 SizeLat:44 for: %fadd_v16f16 = call fast half @llvm.vector.reduce.fadd.v16f16(half 0xH0000, <16 x half> undef) -; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:48 CodeSize:44 Lat:52 SizeLat:44 for: %fadd_v16f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v16f16(half 0xH0000, <16 x half> undef) -; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:38 CodeSize:35 Lat:41 SizeLat:35 for: %fadd_v11f16 = call fast half @llvm.vector.reduce.fadd.v11f16(half 0xH0000, <11 x half> undef) -; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:38 CodeSize:35 Lat:41 SizeLat:35 for: %fadd_v13f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v13f16(half 0xH0000, <13 x half> undef) +; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:7 CodeSize:4 Lat:6 SizeLat:4 for: %fadd_v2f16_fast = call fast half @llvm.vector.reduce.fadd.v2f16(half 0xH0000, <2 x half> undef) +; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:7 CodeSize:4 Lat:6 SizeLat:4 for: %fadd_v2f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v2f16(half 0xH0000, <2 x half> undef) +; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:14 CodeSize:8 Lat:12 SizeLat:8 for: %fadd_v4f16_fast = call fast half @llvm.vector.reduce.fadd.v4f16(half 0xH0000, <4 x half> undef) +; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:14 CodeSize:8 Lat:12 SizeLat:8 for: %fadd_v4f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v4f16(half 0xH0000, <4 x half> undef) +; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:48 CodeSize:27 Lat:33 SizeLat:27 for: %fadd_v8f16 = call fast half @llvm.vector.reduce.fadd.v8f16(half 0xH0000, <8 x half> undef) +; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:48 CodeSize:27 Lat:33 SizeLat:27 for: %fadd_v8f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v8f16(half 0xH0000, <8 x half> undef) +; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:72 CodeSize:44 Lat:52 SizeLat:44 for: %fadd_v16f16 = call fast half @llvm.vector.reduce.fadd.v16f16(half 0xH0000, <16 x half> undef) +; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:72 CodeSize:44 Lat:52 SizeLat:44 for: %fadd_v16f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v16f16(half 0xH0000, <16 x half> undef) +; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:203 CodeSize:35 Lat:41 SizeLat:35 for: %fadd_v11f16 = call fast half @llvm.vector.reduce.fadd.v11f16(half 0xH0000, <11 x half> undef) +; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:248 CodeSize:35 Lat:41 SizeLat:35 for: %fadd_v13f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v13f16(half 0xH0000, <13 x half> undef) ; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void ; %fadd_v2f16_fast = call fast half @llvm.vector.reduce.fadd.v2f16(half 0.0, <2 x half> undef) @@ -175,15 +175,15 @@ define void @fast_fp_reductions_fp16() { define void @fast_fp_reductions_bf16() { ; CHECK-NOFP16-LABEL: 'fast_fp_reductions_bf16' -; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:10 CodeSize:8 Lat:12 SizeLat:8 for: %fadd_v4f8 = call reassoc bfloat @llvm.vector.reduce.fadd.v4bf16(bfloat 0xR8000, <4 x bfloat> undef) +; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:28 CodeSize:8 Lat:12 SizeLat:8 for: %fadd_v4f8 = call reassoc bfloat @llvm.vector.reduce.fadd.v4bf16(bfloat 0xR8000, <4 x bfloat> undef) ; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void ; ; CHECK-F16-LABEL: 'fast_fp_reductions_bf16' -; CHECK-F16-NEXT: Cost Model: Found costs of RThru:10 CodeSize:8 Lat:12 SizeLat:8 for: %fadd_v4f8 = call reassoc bfloat @llvm.vector.reduce.fadd.v4bf16(bfloat 0xR8000, <4 x bfloat> undef) +; CHECK-F16-NEXT: Cost Model: Found costs of RThru:28 CodeSize:8 Lat:12 SizeLat:8 for: %fadd_v4f8 = call reassoc bfloat @llvm.vector.reduce.fadd.v4bf16(bfloat 0xR8000, <4 x bfloat> undef) ; CHECK-F16-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void ; ; CHECK-BF16-LABEL: 'fast_fp_reductions_bf16' -; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:8 CodeSize:8 Lat:12 SizeLat:8 for: %fadd_v4f8 = call reassoc bfloat @llvm.vector.reduce.fadd.v4bf16(bfloat 0xR8000, <4 x bfloat> undef) +; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:14 CodeSize:8 Lat:12 SizeLat:8 for: %fadd_v4f8 = call reassoc bfloat @llvm.vector.reduce.fadd.v4bf16(bfloat 0xR8000, <4 x bfloat> undef) ; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void ; %fadd_v4f8 = call reassoc bfloat @llvm.vector.reduce.fadd.v4f8(bfloat -0.0, <4 x bfloat> undef) diff --git a/llvm/test/Analysis/CostModel/AArch64/sve-arith-fp.ll b/llvm/test/Analysis/CostModel/AArch64/sve-arith-fp.ll index dc95eac..f7ebd40 100644 --- a/llvm/test/Analysis/CostModel/AArch64/sve-arith-fp.ll +++ b/llvm/test/Analysis/CostModel/AArch64/sve-arith-fp.ll @@ -164,3 +164,55 @@ define void @frem() { ret void } + +define void @fma() { +; CHECK-LABEL: 'fma' +; CHECK-NEXT: Cost Model: Found costs of 2 for: %V4F16 = call <vscale x 4 x half> @llvm.fma.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef) +; CHECK-NEXT: Cost Model: Found costs of 2 for: %V8F16 = call <vscale x 8 x half> @llvm.fma.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef) +; CHECK-NEXT: Cost Model: Found costs of 4 for: %V16F16 = call <vscale x 16 x half> @llvm.fma.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, <vscale x 16 x half> undef) +; CHECK-NEXT: Cost Model: Found costs of 2 for: %V2F32 = call <vscale x 2 x float> @llvm.fma.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef) +; CHECK-NEXT: Cost Model: Found costs of 2 for: %V4F32 = call <vscale x 4 x float> @llvm.fma.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef) +; CHECK-NEXT: Cost Model: Found costs of 4 for: %V8F32 = call <vscale x 8 x float> @llvm.fma.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, <vscale x 8 x float> undef) +; CHECK-NEXT: Cost Model: Found costs of 2 for: %V2F64 = call <vscale x 2 x double> @llvm.fma.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef) +; CHECK-NEXT: Cost Model: Found costs of 4 for: %V4F64 = call <vscale x 4 x double> @llvm.fma.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, <vscale x 4 x double> undef) +; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void +; + %V4F16 = call <vscale x 4 x half> @llvm.fma.v4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef) + %V8F16 = call <vscale x 8 x half> @llvm.fma.v8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef) + %V16F16 = call <vscale x 16 x half> @llvm.fma.v16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, <vscale x 16 x half> undef) + + %V2F32 = call <vscale x 2 x float> @llvm.fma.v2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef) + %V4F32 = call <vscale x 4 x float> @llvm.fma.v4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef) + %V8F32 = call <vscale x 8 x float> @llvm.fma.v8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, <vscale x 8 x float> undef) + + %V2F64 = call <vscale x 2 x double> @llvm.fma.v2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef) + %V4F64 = call <vscale x 4 x double> @llvm.fma.v4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, <vscale x 4 x double> undef) + + ret void +} + +define void @fmuladd() { +; CHECK-LABEL: 'fmuladd' +; CHECK-NEXT: Cost Model: Found costs of 2 for: %V4F16 = call <vscale x 4 x half> @llvm.fmuladd.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef) +; CHECK-NEXT: Cost Model: Found costs of 2 for: %V8F16 = call <vscale x 8 x half> @llvm.fmuladd.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef) +; CHECK-NEXT: Cost Model: Found costs of 4 for: %V16F16 = call <vscale x 16 x half> @llvm.fmuladd.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, <vscale x 16 x half> undef) +; CHECK-NEXT: Cost Model: Found costs of 2 for: %V2F32 = call <vscale x 2 x float> @llvm.fmuladd.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef) +; CHECK-NEXT: Cost Model: Found costs of 2 for: %V4F32 = call <vscale x 4 x float> @llvm.fmuladd.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef) +; CHECK-NEXT: Cost Model: Found costs of 4 for: %V8F32 = call <vscale x 8 x float> @llvm.fmuladd.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, <vscale x 8 x float> undef) +; CHECK-NEXT: Cost Model: Found costs of 2 for: %V2F64 = call <vscale x 2 x double> @llvm.fmuladd.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef) +; CHECK-NEXT: Cost Model: Found costs of 4 for: %V4F64 = call <vscale x 4 x double> @llvm.fmuladd.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, <vscale x 4 x double> undef) +; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void +; + %V4F16 = call <vscale x 4 x half> @llvm.fmuladd.v4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef) + %V8F16 = call <vscale x 8 x half> @llvm.fmuladd.v8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef) + %V16F16 = call <vscale x 16 x half> @llvm.fmuladd.v16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, <vscale x 16 x half> undef) + + %V2F32 = call <vscale x 2 x float> @llvm.fmuladd.v2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef) + %V4F32 = call <vscale x 4 x float> @llvm.fmuladd.v4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef) + %V8F32 = call <vscale x 8 x float> @llvm.fmuladd.v8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, <vscale x 8 x float> undef) + + %V2F64 = call <vscale x 2 x double> @llvm.fmuladd.v2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef) + %V4F64 = call <vscale x 4 x double> @llvm.fmuladd.v4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, <vscale x 4 x double> undef) + + ret void +} diff --git a/llvm/test/Analysis/CostModel/AArch64/sve-intrinsics.ll b/llvm/test/Analysis/CostModel/AArch64/sve-intrinsics.ll index 7e8d957..609a23b 100644 --- a/llvm/test/Analysis/CostModel/AArch64/sve-intrinsics.ll +++ b/llvm/test/Analysis/CostModel/AArch64/sve-intrinsics.ll @@ -1277,15 +1277,15 @@ define void @histogram_nxv16i8(<vscale x 16 x ptr> %buckets, <vscale x 16 x i1> define void @histogram_v2i64(<2 x ptr> %buckets, <2 x i1> %mask) { ; CHECK-VSCALE-1-LABEL: 'histogram_v2i64' -; CHECK-VSCALE-1-NEXT: Cost Model: Found costs of Invalid for: call void @llvm.experimental.vector.histogram.add.v2p0.i64(<2 x ptr> %buckets, i64 1, <2 x i1> %mask) +; CHECK-VSCALE-1-NEXT: Cost Model: Found costs of RThru:10 CodeSize:8 Lat:10 SizeLat:10 for: call void @llvm.experimental.vector.histogram.add.v2p0.i64(<2 x ptr> %buckets, i64 1, <2 x i1> %mask) ; CHECK-VSCALE-1-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void ; ; CHECK-VSCALE-2-LABEL: 'histogram_v2i64' -; CHECK-VSCALE-2-NEXT: Cost Model: Found costs of Invalid for: call void @llvm.experimental.vector.histogram.add.v2p0.i64(<2 x ptr> %buckets, i64 1, <2 x i1> %mask) +; CHECK-VSCALE-2-NEXT: Cost Model: Found costs of RThru:10 CodeSize:8 Lat:10 SizeLat:10 for: call void @llvm.experimental.vector.histogram.add.v2p0.i64(<2 x ptr> %buckets, i64 1, <2 x i1> %mask) ; CHECK-VSCALE-2-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void ; ; TYPE_BASED_ONLY-LABEL: 'histogram_v2i64' -; TYPE_BASED_ONLY-NEXT: Cost Model: Found costs of Invalid for: call void @llvm.experimental.vector.histogram.add.v2p0.i64(<2 x ptr> %buckets, i64 1, <2 x i1> %mask) +; TYPE_BASED_ONLY-NEXT: Cost Model: Found costs of RThru:10 CodeSize:8 Lat:10 SizeLat:10 for: call void @llvm.experimental.vector.histogram.add.v2p0.i64(<2 x ptr> %buckets, i64 1, <2 x i1> %mask) ; TYPE_BASED_ONLY-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void ; call void @llvm.experimental.vector.histogram.add.v2p0.i64(<2 x ptr> %buckets, i64 1, <2 x i1> %mask) @@ -1294,15 +1294,15 @@ define void @histogram_v2i64(<2 x ptr> %buckets, <2 x i1> %mask) { define void @histogram_v4i32(<4 x ptr> %buckets, <4 x i1> %mask) { ; CHECK-VSCALE-1-LABEL: 'histogram_v4i32' -; CHECK-VSCALE-1-NEXT: Cost Model: Found costs of Invalid for: call void @llvm.experimental.vector.histogram.add.v4p0.i32(<4 x ptr> %buckets, i32 1, <4 x i1> %mask) +; CHECK-VSCALE-1-NEXT: Cost Model: Found costs of RThru:20 CodeSize:16 Lat:20 SizeLat:20 for: call void @llvm.experimental.vector.histogram.add.v4p0.i32(<4 x ptr> %buckets, i32 1, <4 x i1> %mask) ; CHECK-VSCALE-1-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void ; ; CHECK-VSCALE-2-LABEL: 'histogram_v4i32' -; CHECK-VSCALE-2-NEXT: Cost Model: Found costs of Invalid for: call void @llvm.experimental.vector.histogram.add.v4p0.i32(<4 x ptr> %buckets, i32 1, <4 x i1> %mask) +; CHECK-VSCALE-2-NEXT: Cost Model: Found costs of RThru:20 CodeSize:16 Lat:20 SizeLat:20 for: call void @llvm.experimental.vector.histogram.add.v4p0.i32(<4 x ptr> %buckets, i32 1, <4 x i1> %mask) ; CHECK-VSCALE-2-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void ; ; TYPE_BASED_ONLY-LABEL: 'histogram_v4i32' -; TYPE_BASED_ONLY-NEXT: Cost Model: Found costs of Invalid for: call void @llvm.experimental.vector.histogram.add.v4p0.i32(<4 x ptr> %buckets, i32 1, <4 x i1> %mask) +; TYPE_BASED_ONLY-NEXT: Cost Model: Found costs of RThru:20 CodeSize:16 Lat:20 SizeLat:20 for: call void @llvm.experimental.vector.histogram.add.v4p0.i32(<4 x ptr> %buckets, i32 1, <4 x i1> %mask) ; TYPE_BASED_ONLY-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void ; call void @llvm.experimental.vector.histogram.add.v4p0.i32(<4 x ptr> %buckets, i32 1, <4 x i1> %mask) @@ -1311,15 +1311,15 @@ define void @histogram_v4i32(<4 x ptr> %buckets, <4 x i1> %mask) { define void @histogram_v8i16(<8 x ptr> %buckets, <8 x i1> %mask) { ; CHECK-VSCALE-1-LABEL: 'histogram_v8i16' -; CHECK-VSCALE-1-NEXT: Cost Model: Found costs of Invalid for: call void @llvm.experimental.vector.histogram.add.v8p0.i16(<8 x ptr> %buckets, i16 1, <8 x i1> %mask) +; CHECK-VSCALE-1-NEXT: Cost Model: Found costs of RThru:40 CodeSize:32 Lat:40 SizeLat:40 for: call void @llvm.experimental.vector.histogram.add.v8p0.i16(<8 x ptr> %buckets, i16 1, <8 x i1> %mask) ; CHECK-VSCALE-1-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void ; ; CHECK-VSCALE-2-LABEL: 'histogram_v8i16' -; CHECK-VSCALE-2-NEXT: Cost Model: Found costs of Invalid for: call void @llvm.experimental.vector.histogram.add.v8p0.i16(<8 x ptr> %buckets, i16 1, <8 x i1> %mask) +; CHECK-VSCALE-2-NEXT: Cost Model: Found costs of RThru:40 CodeSize:32 Lat:40 SizeLat:40 for: call void @llvm.experimental.vector.histogram.add.v8p0.i16(<8 x ptr> %buckets, i16 1, <8 x i1> %mask) ; CHECK-VSCALE-2-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void ; ; TYPE_BASED_ONLY-LABEL: 'histogram_v8i16' -; TYPE_BASED_ONLY-NEXT: Cost Model: Found costs of Invalid for: call void @llvm.experimental.vector.histogram.add.v8p0.i16(<8 x ptr> %buckets, i16 1, <8 x i1> %mask) +; TYPE_BASED_ONLY-NEXT: Cost Model: Found costs of RThru:40 CodeSize:32 Lat:40 SizeLat:40 for: call void @llvm.experimental.vector.histogram.add.v8p0.i16(<8 x ptr> %buckets, i16 1, <8 x i1> %mask) ; TYPE_BASED_ONLY-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void ; call void @llvm.experimental.vector.histogram.add.v8p0.i16(<8 x ptr> %buckets, i16 1, <8 x i1> %mask) @@ -1328,15 +1328,15 @@ define void @histogram_v8i16(<8 x ptr> %buckets, <8 x i1> %mask) { define void @histogram_v16i8(<16 x ptr> %buckets, <16 x i1> %mask) { ; CHECK-VSCALE-1-LABEL: 'histogram_v16i8' -; CHECK-VSCALE-1-NEXT: Cost Model: Found costs of Invalid for: call void @llvm.experimental.vector.histogram.add.v16p0.i8(<16 x ptr> %buckets, i8 1, <16 x i1> %mask) +; CHECK-VSCALE-1-NEXT: Cost Model: Found costs of RThru:80 CodeSize:64 Lat:80 SizeLat:80 for: call void @llvm.experimental.vector.histogram.add.v16p0.i8(<16 x ptr> %buckets, i8 1, <16 x i1> %mask) ; CHECK-VSCALE-1-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void ; ; CHECK-VSCALE-2-LABEL: 'histogram_v16i8' -; CHECK-VSCALE-2-NEXT: Cost Model: Found costs of Invalid for: call void @llvm.experimental.vector.histogram.add.v16p0.i8(<16 x ptr> %buckets, i8 1, <16 x i1> %mask) +; CHECK-VSCALE-2-NEXT: Cost Model: Found costs of RThru:80 CodeSize:64 Lat:80 SizeLat:80 for: call void @llvm.experimental.vector.histogram.add.v16p0.i8(<16 x ptr> %buckets, i8 1, <16 x i1> %mask) ; CHECK-VSCALE-2-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void ; ; TYPE_BASED_ONLY-LABEL: 'histogram_v16i8' -; TYPE_BASED_ONLY-NEXT: Cost Model: Found costs of Invalid for: call void @llvm.experimental.vector.histogram.add.v16p0.i8(<16 x ptr> %buckets, i8 1, <16 x i1> %mask) +; TYPE_BASED_ONLY-NEXT: Cost Model: Found costs of RThru:80 CodeSize:64 Lat:80 SizeLat:80 for: call void @llvm.experimental.vector.histogram.add.v16p0.i8(<16 x ptr> %buckets, i8 1, <16 x i1> %mask) ; TYPE_BASED_ONLY-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void ; call void @llvm.experimental.vector.histogram.add.v16p0.i64(<16 x ptr> %buckets, i8 1, <16 x i1> %mask) diff --git a/llvm/test/Analysis/CostModel/AArch64/vec3-ops.ll b/llvm/test/Analysis/CostModel/AArch64/vec3-ops.ll index 6bcf3c7..f234341 100644 --- a/llvm/test/Analysis/CostModel/AArch64/vec3-ops.ll +++ b/llvm/test/Analysis/CostModel/AArch64/vec3-ops.ll @@ -206,8 +206,8 @@ define void @vec3_float(<3 x float> %a, <3 x float> %b, ptr %src, ptr %dst) { define void @vec3_half(<3 x half> %a, <3 x half> %b, ptr %src, ptr %dst) { ; CHECK-LABEL: 'vec3_half' ; CHECK-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:4 SizeLat:1 for: %l = load <3 x half>, ptr %src, align 1 -; CHECK-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %add = fadd <3 x half> %l, %b -; CHECK-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %sub = fsub <3 x half> %add, %a +; CHECK-NEXT: Cost Model: Found costs of RThru:4 CodeSize:1 Lat:3 SizeLat:1 for: %add = fadd <3 x half> %l, %b +; CHECK-NEXT: Cost Model: Found costs of RThru:4 CodeSize:1 Lat:3 SizeLat:1 for: %sub = fsub <3 x half> %add, %a ; CHECK-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:1 SizeLat:1 for: store <3 x half> %sub, ptr %dst, align 1 ; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void ; diff --git a/llvm/test/Analysis/CostModel/SystemZ/intrinsic-cost-crash.ll b/llvm/test/Analysis/CostModel/SystemZ/intrinsic-cost-crash.ll index ecc7fc8..245e8f7 100644 --- a/llvm/test/Analysis/CostModel/SystemZ/intrinsic-cost-crash.ll +++ b/llvm/test/Analysis/CostModel/SystemZ/intrinsic-cost-crash.ll @@ -23,7 +23,7 @@ %"class.llvm::Metadata.306.1758.9986.10470.10954.11438.11922.12406.12890.13374.13858.15310.15794.16278.17730.19182.21118.25958.26926.29346.29830.30314.30798.31282.31766.32250.32734.33702.36606.38058.41638" = type { i8, i8, i16, i32 } ; Function Attrs: argmemonly nounwind -declare void @llvm.lifetime.end(i64, ptr nocapture) #0 +declare void @llvm.lifetime.end(ptr nocapture) #0 ; Function Attrs: nounwind ssp uwtable define hidden void @fun(ptr %N, i1 %arg) #1 align 2 { @@ -42,7 +42,6 @@ for.cond.cleanup: ; preds = %for.body, %entry for.body: ; preds = %for.body, %for.body.lr.ph %indvars.iv190 = phi i64 [ 0, %for.body.lr.ph ], [ %indvars.iv.next191, %for.body ] - call void @llvm.lifetime.end(i64 16, ptr nonnull null) %indvars.iv.next191 = add nuw nsw i64 %indvars.iv190, 1 %exitcond193 = icmp eq i64 %indvars.iv.next191, %wide.trip.count192 br i1 %exitcond193, label %for.cond.cleanup, label %for.body diff --git a/llvm/test/Analysis/CostModel/X86/free-intrinsics.ll b/llvm/test/Analysis/CostModel/X86/free-intrinsics.ll index 3a54428..cef960d 100644 --- a/llvm/test/Analysis/CostModel/X86/free-intrinsics.ll +++ b/llvm/test/Analysis/CostModel/X86/free-intrinsics.ll @@ -14,8 +14,8 @@ define i32 @trivially_free() { ; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a2 = call ptr @llvm.launder.invariant.group.p0(ptr undef) ; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a3 = call ptr @llvm.strip.invariant.group.p0(ptr undef) ; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a4 = call i1 @llvm.is.constant.i32(i32 undef) -; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.start.p0(i64 1, ptr %alloca) -; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.end.p0(i64 1, ptr %alloca) +; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.start.p0(ptr %alloca) +; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.end.p0(ptr %alloca) ; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a5 = call i64 @llvm.objectsize.i64.p0(ptr undef, i1 true, i1 true, i1 true) ; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a6 = call ptr @llvm.ptr.annotation.p0.p0(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef) ; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.var.annotation.p0.p0(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef) @@ -32,8 +32,8 @@ define i32 @trivially_free() { ; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a2 = call ptr @llvm.launder.invariant.group.p0(ptr undef) ; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a3 = call ptr @llvm.strip.invariant.group.p0(ptr undef) ; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a4 = call i1 @llvm.is.constant.i32(i32 undef) -; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.start.p0(i64 1, ptr %alloca) -; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.end.p0(i64 1, ptr %alloca) +; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.start.p0(ptr %alloca) +; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.end.p0(ptr %alloca) ; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a5 = call i64 @llvm.objectsize.i64.p0(ptr undef, i1 true, i1 true, i1 true) ; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a6 = call ptr @llvm.ptr.annotation.p0.p0(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef) ; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.var.annotation.p0.p0(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef) @@ -49,8 +49,8 @@ define i32 @trivially_free() { %a2 = call ptr @llvm.launder.invariant.group.p0(ptr undef) %a3 = call ptr @llvm.strip.invariant.group.p0(ptr undef) %a4 = call i1 @llvm.is.constant.i32(i32 undef) - call void @llvm.lifetime.start.p0(i64 1, ptr %alloca) - call void @llvm.lifetime.end.p0(i64 1, ptr %alloca) + call void @llvm.lifetime.start.p0(ptr %alloca) + call void @llvm.lifetime.end.p0(ptr %alloca) %a5 = call i64 @llvm.objectsize.i64.p0(ptr undef, i1 1, i1 1, i1 1) %a6 = call ptr @llvm.ptr.annotation.p0(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef) call void @llvm.var.annotation(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef) @@ -66,8 +66,8 @@ declare void @llvm.invariant.end.p0(ptr, i64, ptr) declare ptr @llvm.launder.invariant.group.p0(ptr) declare ptr @llvm.strip.invariant.group.p0(ptr) declare i1 @llvm.is.constant.i32(i32) -declare void @llvm.lifetime.start.p0(i64, ptr) -declare void @llvm.lifetime.end.p0(i64, ptr) +declare void @llvm.lifetime.start.p0(ptr) +declare void @llvm.lifetime.end.p0(ptr) declare i64 @llvm.objectsize.i64.p0(ptr, i1, i1, i1) declare ptr @llvm.ptr.annotation.p0(ptr, ptr, ptr, i32, ptr) declare void @llvm.var.annotation(ptr, ptr, ptr, i32, ptr) diff --git a/llvm/test/Analysis/CostModel/free-intrinsics-datalayout.ll b/llvm/test/Analysis/CostModel/free-intrinsics-datalayout.ll index 96064dc..2acc8e8 100644 --- a/llvm/test/Analysis/CostModel/free-intrinsics-datalayout.ll +++ b/llvm/test/Analysis/CostModel/free-intrinsics-datalayout.ll @@ -16,8 +16,8 @@ define i32 @trivially_free() { ; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a2 = call ptr @llvm.launder.invariant.group.p0(ptr undef) ; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a3 = call ptr @llvm.strip.invariant.group.p0(ptr undef) ; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a4 = call i1 @llvm.is.constant.i32(i32 undef) -; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.start.p0(i64 1, ptr %alloca) -; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.end.p0(i64 1, ptr %alloca) +; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.start.p0(ptr %alloca) +; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.end.p0(ptr %alloca) ; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a5 = call i64 @llvm.objectsize.i64.p0(ptr undef, i1 true, i1 true, i1 true) ; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a6 = call ptr @llvm.ptr.annotation.p0.p0(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef) ; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a7 = call i1 @llvm.allow.ubsan.check(i8 123) @@ -36,8 +36,8 @@ define i32 @trivially_free() { ; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a2 = call ptr @llvm.launder.invariant.group.p0(ptr undef) ; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a3 = call ptr @llvm.strip.invariant.group.p0(ptr undef) ; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a4 = call i1 @llvm.is.constant.i32(i32 undef) -; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.start.p0(i64 1, ptr %alloca) -; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.end.p0(i64 1, ptr %alloca) +; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.start.p0(ptr %alloca) +; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.end.p0(ptr %alloca) ; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a5 = call i64 @llvm.objectsize.i64.p0(ptr undef, i1 true, i1 true, i1 true) ; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a6 = call ptr @llvm.ptr.annotation.p0.p0(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef) ; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a7 = call i1 @llvm.allow.ubsan.check(i8 123) @@ -55,8 +55,8 @@ define i32 @trivially_free() { %a2 = call ptr @llvm.launder.invariant.group.p0(ptr undef) %a3 = call ptr @llvm.strip.invariant.group.p0(ptr undef) %a4 = call i1 @llvm.is.constant.i32(i32 undef) - call void @llvm.lifetime.start.p0(i64 1, ptr %alloca) - call void @llvm.lifetime.end.p0(i64 1, ptr %alloca) + call void @llvm.lifetime.start.p0(ptr %alloca) + call void @llvm.lifetime.end.p0(ptr %alloca) %a5 = call i64 @llvm.objectsize.i64.p0(ptr undef, i1 1, i1 1, i1 1) %a6 = call ptr @llvm.ptr.annotation.p0(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef) %a7 = call i1 @llvm.allow.ubsan.check(i8 123) @@ -74,8 +74,8 @@ declare void @llvm.invariant.end.p0(ptr, i64, ptr) declare ptr @llvm.launder.invariant.group.p0(ptr) declare ptr @llvm.strip.invariant.group.p0(ptr) declare i1 @llvm.is.constant.i32(i32) -declare void @llvm.lifetime.start.p0(i64, ptr) -declare void @llvm.lifetime.end.p0(i64, ptr) +declare void @llvm.lifetime.start.p0(ptr) +declare void @llvm.lifetime.end.p0(ptr) declare i64 @llvm.objectsize.i64.p0(ptr, i1, i1, i1) declare ptr @llvm.ptr.annotation.p0(ptr, ptr, ptr, i32, ptr) declare void @llvm.var.annotation(ptr, ptr, ptr, i32, ptr) diff --git a/llvm/test/Analysis/CostModel/free-intrinsics-no_info.ll b/llvm/test/Analysis/CostModel/free-intrinsics-no_info.ll index f989ebe..7f002d0 100644 --- a/llvm/test/Analysis/CostModel/free-intrinsics-no_info.ll +++ b/llvm/test/Analysis/CostModel/free-intrinsics-no_info.ll @@ -14,8 +14,8 @@ define i32 @trivially_free() { ; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a2 = call ptr @llvm.launder.invariant.group.p0(ptr undef) ; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a3 = call ptr @llvm.strip.invariant.group.p0(ptr undef) ; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a4 = call i1 @llvm.is.constant.i32(i32 undef) -; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.start.p0(i64 1, ptr %alloca) -; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.end.p0(i64 1, ptr %alloca) +; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.start.p0(ptr %alloca) +; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.end.p0(ptr %alloca) ; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a5 = call i64 @llvm.objectsize.i64.p0(ptr undef, i1 true, i1 true, i1 true) ; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a6 = call ptr @llvm.ptr.annotation.p0.p0(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef) ; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.var.annotation.p0.p0(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef) @@ -34,8 +34,8 @@ define i32 @trivially_free() { ; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a2 = call ptr @llvm.launder.invariant.group.p0(ptr undef) ; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a3 = call ptr @llvm.strip.invariant.group.p0(ptr undef) ; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a4 = call i1 @llvm.is.constant.i32(i32 undef) -; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.start.p0(i64 1, ptr %alloca) -; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.end.p0(i64 1, ptr %alloca) +; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.start.p0(ptr %alloca) +; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.end.p0(ptr %alloca) ; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a5 = call i64 @llvm.objectsize.i64.p0(ptr undef, i1 true, i1 true, i1 true) ; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a6 = call ptr @llvm.ptr.annotation.p0.p0(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef) ; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.var.annotation.p0.p0(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef) @@ -53,8 +53,8 @@ define i32 @trivially_free() { %a2 = call ptr @llvm.launder.invariant.group.p0(ptr undef) %a3 = call ptr @llvm.strip.invariant.group.p0(ptr undef) %a4 = call i1 @llvm.is.constant.i32(i32 undef) - call void @llvm.lifetime.start.p0(i64 1, ptr %alloca) - call void @llvm.lifetime.end.p0(i64 1, ptr %alloca) + call void @llvm.lifetime.start.p0(ptr %alloca) + call void @llvm.lifetime.end.p0(ptr %alloca) %a5 = call i64 @llvm.objectsize.i64.p0(ptr undef, i1 1, i1 1, i1 1) %a6 = call ptr @llvm.ptr.annotation.p0(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef) call void @llvm.var.annotation(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef) @@ -72,8 +72,8 @@ declare void @llvm.invariant.end.p0(ptr, i64, ptr) declare ptr @llvm.launder.invariant.group.p0(ptr) declare ptr @llvm.strip.invariant.group.p0(ptr) declare i1 @llvm.is.constant.i32(i32) -declare void @llvm.lifetime.start.p0(i64, ptr) -declare void @llvm.lifetime.end.p0(i64, ptr) +declare void @llvm.lifetime.start.p0(ptr) +declare void @llvm.lifetime.end.p0(ptr) declare i64 @llvm.objectsize.i64.p0(ptr, i1, i1, i1) declare ptr @llvm.ptr.annotation.p0(ptr, ptr, ptr, i32, ptr) declare void @llvm.var.annotation(ptr, ptr, ptr, i32, ptr) diff --git a/llvm/test/Analysis/Delinearization/fixed_size_array.ll b/llvm/test/Analysis/Delinearization/fixed_size_array.ll new file mode 100644 index 0000000..0512044 --- /dev/null +++ b/llvm/test/Analysis/Delinearization/fixed_size_array.ll @@ -0,0 +1,499 @@ +; RUN: opt < %s -passes='print<delinearization>' -disable-output -delinearize-use-fixed-size-array-heuristic 2>&1 | FileCheck %s + +; void f(int A[][8][32]) { +; for (i = 0; i < 42; i++) +; for (j = 0; j < 8; j++) +; for (k = 0; k < 32; k++) +; A[i][j][k] = 1; +; } + +; CHECK: Delinearization on function a_i_j_k: +; CHECK: Base offset: %a +; CHECK-NEXT: ArrayDecl[UnknownSize][8][32] with elements of 4 bytes. +; CHECK-NEXT: ArrayRef[{0,+,1}<nuw><nsw><%for.i.header>][{0,+,1}<nuw><nsw><%for.j.header>][{0,+,1}<nuw><nsw><%for.k>] +define void @a_i_j_k(ptr %a) { +entry: + br label %for.i.header + +for.i.header: + %i = phi i32 [ 0, %entry ], [ %i.inc, %for.i.latch ] + br label %for.j.header + +for.j.header: + %j = phi i32 [ 0, %for.i.header ], [ %j.inc, %for.j.latch ] + br label %for.k + +for.k: + %k = phi i32 [ 0, %for.j.header ], [ %k.inc, %for.k ] + %idx = getelementptr [8 x [32 x i32]], ptr %a, i32 %i, i32 %j, i32 %k + store i32 1, ptr %idx + %k.inc = add i32 %k, 1 + %cmp.k = icmp slt i32 %k.inc, 32 + br i1 %cmp.k, label %for.k, label %for.j.latch + +for.j.latch: + %j.inc = add i32 %j, 1 + %cmp.j = icmp slt i32 %j.inc, 8 + br i1 %cmp.j, label %for.j.header, label %for.i.latch + +for.i.latch: + %i.inc = add i32 %i, 1 + %cmp.i = icmp slt i32 %i.inc, 42 + br i1 %cmp.i, label %for.i.header, label %exit + +exit: + ret void +} + +; void f(int A[][8][32]) { +; for (i = 0; i < 42; i++) +; for (j = 0; j < 8; j++) +; for (k = 0; k < 32; k++) +; A[i][7-j][k] = 1; +; } + +; CHECK: Delinearization on function a_i_nj_k: +; CHECK: Base offset: %a +; CHECK-NEXT: ArrayDecl[UnknownSize][8][32] with elements of 4 bytes. +; CHECK-NEXT: ArrayRef[{0,+,1}<nuw><nsw><%for.i.header>][{7,+,-1}<nsw><%for.j.header>][{0,+,1}<nuw><nsw><%for.k>] +define void @a_i_nj_k(ptr %a) { +entry: + br label %for.i.header + +for.i.header: + %i = phi i32 [ 0, %entry ], [ %i.inc, %for.i.latch ] + br label %for.j.header + +for.j.header: + %j = phi i32 [ 0, %for.i.header ], [ %j.inc, %for.j.latch ] + %j.subscript = sub i32 7, %j + br label %for.k + +for.k: + %k = phi i32 [ 0, %for.j.header ], [ %k.inc, %for.k ] + %idx = getelementptr [8 x [32 x i32]], ptr %a, i32 %i, i32 %j.subscript, i32 %k + store i32 1, ptr %idx + %k.inc = add i32 %k, 1 + %cmp.k = icmp slt i32 %k.inc, 32 + br i1 %cmp.k, label %for.k, label %for.j.latch + +for.j.latch: + %j.inc = add i32 %j, 1 + %cmp.j = icmp slt i32 %j.inc, 8 + br i1 %cmp.j, label %for.j.header, label %for.i.latch + +for.i.latch: + %i.inc = add i32 %i, 1 + %cmp.i = icmp slt i32 %i.inc, 42 + br i1 %cmp.i, label %for.i.header, label %exit + +exit: + ret void +} + +; In the following code, the access functions for both stores are represented +; in the same way in SCEV, so the delinearization results are also the same. We +; don't have any type information of the underlying objects. +; +; void f(int A[][4][64], int B[][8][32]) { +; for (i = 0; i < 42; i++) +; for (j = 0; j < 4; j++) +; for (k = 0; k < 32; k++) { +; A[i][j][k] = 1; +; B[i][2*j][k] = 1; +; } +; } + +; CHECK: Delinearization on function a_ijk_b_i2jk: +; CHECK: Base offset: %a +; CHECK-NEXT: ArrayDecl[UnknownSize][4][64] with elements of 4 bytes. +; CHECK-NEXT: ArrayRef[{0,+,1}<nuw><nsw><%for.i.header>][{0,+,1}<nuw><nsw><%for.j.header>][{0,+,1}<nuw><nsw><%for.k>] +; CHECK: Base offset: %b +; CHECK-NEXT: ArrayDecl[UnknownSize][4][64] with elements of 4 bytes. +; CHECK-NEXT: ArrayRef[{0,+,1}<nuw><nsw><%for.i.header>][{0,+,1}<nuw><nsw><%for.j.header>][{0,+,1}<nuw><nsw><%for.k>] +define void @a_ijk_b_i2jk(ptr %a, ptr %b) { +entry: + br label %for.i.header + +for.i.header: + %i = phi i32 [ 0, %entry ], [ %i.inc, %for.i.latch ] + br label %for.j.header + +for.j.header: + %j = phi i32 [ 0, %for.i.header ], [ %j.inc, %for.j.latch ] + %j2 = shl i32 %j, 1 + br label %for.k + +for.k: + %k = phi i32 [ 0, %for.j.header ], [ %k.inc, %for.k ] + %a.idx = getelementptr [4 x [64 x i32]], ptr %a, i32 %i, i32 %j, i32 %k + %b.idx = getelementptr [8 x [32 x i32]], ptr %b, i32 %i, i32 %j2, i32 %k + store i32 1, ptr %a.idx + store i32 1, ptr %b.idx + %k.inc = add i32 %k, 1 + %cmp.k = icmp slt i32 %k.inc, 32 + br i1 %cmp.k, label %for.k, label %for.j.latch + +for.j.latch: + %j.inc = add i32 %j, 1 + %cmp.j = icmp slt i32 %j.inc, 4 + br i1 %cmp.j, label %for.j.header, label %for.i.latch + +for.i.latch: + %i.inc = add i32 %i, 1 + %cmp.i = icmp slt i32 %i.inc, 42 + br i1 %cmp.i, label %for.i.header, label %exit + +exit: + ret void +} + +; The type information of the underlying object is not available, so the +; delinearization result is different from the original array size. In this +; case, the underlying object is a type of int[][8][32], but the +; delinearization result is like int[][4][64]. +; +; void f(int A[][8][32]) { +; for (i = 0; i < 42; i++) +; for (j = 0; j < 3; j++) +; for (k = 0; k < 32; k++) +; A[i][2*j+1][k] = 1; +; } + +; CHECK: Delinearization on function a_i_2j1_k: +; CHECK: Base offset: %a +; CHECK-NEXT: ArrayDecl[UnknownSize][4][64] with elements of 4 bytes. +; CHECK-NEXT: ArrayRef[{0,+,1}<nuw><nsw><%for.i.header>][{0,+,1}<nuw><%for.j.header>][{32,+,1}<nw><%for.k>] +define void @a_i_2j1_k(ptr %a) { +entry: + br label %for.i.header + +for.i.header: + %i = phi i32 [ 0, %entry ], [ %i.inc, %for.i.latch ] + br label %for.j.header + +for.j.header: + %j = phi i32 [ 0, %for.i.header ], [ %j.inc, %for.j.latch ] + %j2 = shl i32 %j, 1 + %j.subscript = add i32 %j2, 1 + br label %for.k + +for.k: + %k = phi i32 [ 0, %for.j.header ], [ %k.inc, %for.k ] + %idx = getelementptr [8 x [32 x i32]], ptr %a, i32 %i, i32 %j.subscript, i32 %k + store i32 1, ptr %idx + %k.inc = add i32 %k, 1 + %cmp.k = icmp slt i32 %k.inc, 32 + br i1 %cmp.k, label %for.k, label %for.j.latch + +for.j.latch: + %j.inc = add i32 %j, 1 + %cmp.j = icmp slt i32 %j.inc, 3 + br i1 %cmp.j, label %for.j.header, label %for.i.latch + +for.i.latch: + %i.inc = add i32 %i, 1 + %cmp.i = icmp slt i32 %i.inc, 42 + br i1 %cmp.i, label %for.i.header, label %exit + +exit: + ret void +} + +; Fail to delinearize because the step recurrence value of the i-loop is not +; divisible by that of the j-loop. +; +; void f(int A[][8][32]) { +; for (i = 0; i < 42; i++) +; for (j = 0; j < 2; j++) +; for (k = 0; k < 42; k++) +; A[i][3*j][k] = 1; +; } + +; CHECK: Delinearization on function a_i_3j_k: +; CHECK: AccessFunction: {{...}}0,+,1024}<nuw><nsw><%for.i.header>,+,384}<nw><%for.j.header>,+,4}<nw><%for.k> +; CHECK-NEXT: failed to delinearize +define void @a_i_3j_k(ptr %a) { +entry: + br label %for.i.header + +for.i.header: + %i = phi i32 [ 0, %entry ], [ %i.inc, %for.i.latch ] + br label %for.j.header + +for.j.header: + %j = phi i32 [ 0, %for.i.header ], [ %j.inc, %for.j.latch ] + %j.subscript = mul i32 %j, 3 + br label %for.k + +for.k: + %k = phi i32 [ 0, %for.j.header ], [ %k.inc, %for.k ] + %idx = getelementptr [8 x [32 x i32]], ptr %a, i32 %i, i32 %j.subscript, i32 %k + store i32 1, ptr %idx + %k.inc = add i32 %k, 1 + %cmp.k = icmp slt i32 %k.inc, 42 + br i1 %cmp.k, label %for.k, label %for.j.latch + +for.j.latch: + %j.inc = add i32 %j, 1 + %cmp.j = icmp slt i32 %j.inc, 2 + br i1 %cmp.j, label %for.j.header, label %for.i.latch + +for.i.latch: + %i.inc = add i32 %i, 1 + %cmp.i = icmp slt i32 %i.inc, 42 + br i1 %cmp.i, label %for.i.header, label %exit + +exit: + ret void +} + +; Although the step recurrence value of j-loop is not divisible by that of the +; k-loop, delinearization is possible because we know that the "actual" stride +; width for the last dimension is 4 instead of 12. +; +; void f(int A[][8][32]) { +; for (i = 0; i < 42; i++) +; for (j = 0; j < 8; j++) +; for (k = 0; k < 10; k++) +; A[i][j][3*k] = 1; +; } + +; CHECK: Delinearization on function a_i_j_3k: +; CHECK: Base offset: %a +; CHECK-NEXT: ArrayDecl[UnknownSize][8][32] with elements of 4 bytes. +; CHECK-NEXT: ArrayRef[{0,+,1}<nuw><nsw><%for.i.header>][{0,+,1}<nuw><nsw><%for.j.header>][{0,+,3}<nuw><nsw><%for.k>] +define void @a_i_j_3k(ptr %a) { +entry: + br label %for.i.header + +for.i.header: + %i = phi i32 [ 0, %entry ], [ %i.inc, %for.i.latch ] + br label %for.j.header + +for.j.header: + %j = phi i32 [ 0, %for.i.header ], [ %j.inc, %for.j.latch ] + br label %for.k + +for.k: + %k = phi i32 [ 0, %for.j.header ], [ %k.inc, %for.k ] + %k.subscript = mul i32 %k, 3 + %idx = getelementptr [8 x [32 x i32]], ptr %a, i32 %i, i32 %j, i32 %k.subscript + store i32 1, ptr %idx + %k.inc = add i32 %k, 1 + %cmp.k = icmp slt i32 %k.inc, 10 + br i1 %cmp.k, label %for.k, label %for.j.latch + +for.j.latch: + %j.inc = add i32 %j, 1 + %cmp.j = icmp slt i32 %j.inc, 8 + br i1 %cmp.j, label %for.j.header, label %for.i.latch + +for.i.latch: + %i.inc = add i32 %i, 1 + %cmp.i = icmp slt i32 %i.inc, 42 + br i1 %cmp.i, label %for.i.header, label %exit + +exit: + ret void +} + +; Fail to delinearize because i is used in multiple subscripts that are not adjacent. +; +; void f(int A[][8][32]) { +; for (i = 0; i < 32; i++) +; for (j = 0; j < 2; j++) +; for (k = 0; k < 4; k++) +; A[i][2*j+k][i] = 1; +; } + +; CHECK: Delinearization on function a_i_j2k_i: +; CHECK: AccessFunction: {{...}}0,+,1028}<%for.i.header>,+,256}<nw><%for.j.header>,+,128}<nw><%for.k> +; CHECK-NEXT: failed to delinearize +define void @a_i_j2k_i(ptr %a) { +entry: + br label %for.i.header + +for.i.header: + %i = phi i32 [ 0, %entry ], [ %i.inc, %for.i.latch ] + br label %for.j.header + +for.j.header: + %j = phi i32 [ 0, %for.i.header ], [ %j.inc, %for.j.latch ] + br label %for.k + +for.k: + %k = phi i32 [ 0, %for.j.header ], [ %k.inc, %for.k ] + %j2 = shl i32 %j, 1 + %j2.k = add i32 %j2, %k + %idx = getelementptr [8 x [32 x i32]], ptr %a, i32 %i, i32 %j2.k, i32 %i + store i32 1, ptr %idx + %k.inc = add i32 %k, 1 + %cmp.k = icmp slt i32 %k.inc, 4 + br i1 %cmp.k, label %for.k, label %for.j.latch + +for.j.latch: + %j.inc = add i32 %j, 1 + %cmp.j = icmp slt i32 %j.inc, 2 + br i1 %cmp.j, label %for.j.header, label %for.i.latch + +for.i.latch: + %i.inc = add i32 %i, 1 + %cmp.i = icmp slt i32 %i.inc, 32 + br i1 %cmp.i, label %for.i.header, label %exit + +exit: + ret void +} + +; Can delinearize, but the result is different from the original array size. In +; this case, the outermost two dimensions are melded into one. +; +; void f(int A[][8][32]) { +; for (i = 0; i < 8; i++) +; for (j = 0; j < 10; j++) +; for (k = 0; k < 10; k++) +; A[i][i][j+k] = 1; +; } + +; CHECK: Delinearization on function a_i_i_jk: +; CHECK: Base offset: %a +; CHECK-NEXT: ArrayDecl[UnknownSize][288] with elements of 4 bytes. +; CHECK-NEXT: ArrayRef[{0,+,1}<nuw><nsw><%for.i.header>][{{..}}0,+,1}<nuw><nsw><%for.j.header>,+,1}<nuw><nsw><%for.k>] +define void @a_i_i_jk(ptr %a) { +entry: + br label %for.i.header + +for.i.header: + %i = phi i32 [ 0, %entry ], [ %i.inc, %for.i.latch ] + br label %for.j.header + +for.j.header: + %j = phi i32 [ 0, %for.i.header ], [ %j.inc, %for.j.latch ] + br label %for.k + +for.k: + %k = phi i32 [ 0, %for.j.header ], [ %k.inc, %for.k ] + %jk = add i32 %j, %k + %idx = getelementptr [8 x [32 x i32]], ptr %a, i32 %i, i32 %i, i32 %jk + store i32 1, ptr %idx + %k.inc = add i32 %k, 1 + %cmp.k = icmp slt i32 %k.inc, 10 + br i1 %cmp.k, label %for.k, label %for.j.latch + +for.j.latch: + %j.inc = add i32 %j, 1 + %cmp.j = icmp slt i32 %j.inc, 10 + br i1 %cmp.j, label %for.j.header, label %for.i.latch + +for.i.latch: + %i.inc = add i32 %i, 1 + %cmp.i = icmp slt i32 %i.inc, 8 + br i1 %cmp.i, label %for.i.header, label %exit + +exit: + ret void +} + +; void f(int A[][8][32]) { +; for (i = 0; i < 8; i++) +; for (j = 0; j < 4; j++) +; for (k = 0; k < 4; k++) +; for (l = 0; l < 32; l++) +; A[i][j+k][l] = 1; +; } + +; CHECK: Delinearization on function a_i_jk_l: +; CHECK: Base offset: %a +; CHECK-NEXT: ArrayDecl[UnknownSize][8][32] with elements of 4 bytes. +; CHECK-NEXT: ArrayRef[{0,+,1}<nuw><nsw><%for.i.header>][{{..}}0,+,1}<nuw><nsw><%for.j.header>,+,1}<nuw><nsw><%for.k.header>][{0,+,1}<nuw><nsw><%for.l>] + +define void @a_i_jk_l(ptr %a) { +entry: + br label %for.i.header + +for.i.header: + %i = phi i32 [ 0, %entry ], [ %i.inc, %for.i.latch ] + br label %for.j.header + +for.j.header: + %j = phi i32 [ 0, %for.i.header ], [ %j.inc, %for.j.latch ] + br label %for.k.header + +for.k.header: + %k = phi i32 [ 0, %for.j.header ], [ %k.inc, %for.k.latch ] + %jk = add i32 %j, %k + br label %for.l + +for.l: + %l = phi i32 [ 0, %for.k.header ], [ %l.inc, %for.l ] + %idx = getelementptr [8 x [32 x i32]], ptr %a, i32 %i, i32 %jk, i32 %l + store i32 1, ptr %idx + %l.inc = add i32 %l, 1 + %cmp.l = icmp slt i32 %l.inc, 32 + br i1 %cmp.l, label %for.l, label %for.k.latch + +for.k.latch: + %k.inc = add i32 %k, 1 + %cmp.k = icmp slt i32 %k.inc, 4 + br i1 %cmp.k, label %for.k.header, label %for.j.latch + +for.j.latch: + %j.inc = add i32 %j, 1 + %cmp.j = icmp slt i32 %j.inc, 4 + br i1 %cmp.j, label %for.j.header, label %for.i.latch + +for.i.latch: + %i.inc = add i32 %i, 1 + %cmp.i = icmp slt i32 %i.inc, 8 + br i1 %cmp.i, label %for.i.header, label %exit + +exit: + ret void +} + +; Reject if the address is not a multiple of the element size. +; +; void f(int *A) { +; for (i = 0; i < 42; i++) +; for (j = 0; j < 8; j++) +; for (k = 0; k < 32; k++) +; *((int *)((char *)A + i*256 + j*32 + k)) = 1; +; } + +; CHECK: Delinearization on function non_divisible_by_element_size: +; CHECK: AccessFunction: {{...}}0,+,256}<nuw><nsw><%for.i.header>,+,32}<nw><%for.j.header>,+,1}<nw><%for.k> +; CHECK-NEXT: failed to delinearize +define void @non_divisible_by_element_size(ptr %a) { +entry: + br label %for.i.header + +for.i.header: + %i = phi i32 [ 0, %entry ], [ %i.inc, %for.i.latch ] + br label %for.j.header + +for.j.header: + %j = phi i32 [ 0, %for.i.header ], [ %j.inc, %for.j.latch ] + br label %for.k + +for.k: + %k = phi i32 [ 0, %for.j.header ], [ %k.inc, %for.k ] + %idx = getelementptr [8 x [32 x i8]], ptr %a, i32 %i, i32 %j, i32 %k + store i32 1, ptr %idx + %k.inc = add i32 %k, 1 + %cmp.k = icmp slt i32 %k.inc, 32 + br i1 %cmp.k, label %for.k, label %for.j.latch + +for.j.latch: + %j.inc = add i32 %j, 1 + %cmp.j = icmp slt i32 %j.inc, 8 + br i1 %cmp.j, label %for.j.header, label %for.i.latch + +for.i.latch: + %i.inc = add i32 %i, 1 + %cmp.i = icmp slt i32 %i.inc, 42 + br i1 %cmp.i, label %for.i.header, label %exit + +exit: + ret void +} diff --git a/llvm/test/Analysis/IR2Vec/Inputs/dummy_2D_vocab.json b/llvm/test/Analysis/IR2Vec/Inputs/dummy_2D_vocab.json index 9b38f2e..07fde84 100644 --- a/llvm/test/Analysis/IR2Vec/Inputs/dummy_2D_vocab.json +++ b/llvm/test/Analysis/IR2Vec/Inputs/dummy_2D_vocab.json @@ -47,6 +47,7 @@ "FPTrunc": [89, 90], "FPExt": [91, 92], "PtrToInt": [93, 94], + "PtrToAddr": [135, 136], "IntToPtr": [95, 96], "BitCast": [97, 98], "AddrSpaceCast": [99, 100], diff --git a/llvm/test/Analysis/IR2Vec/Inputs/reference_default_vocab_print.txt b/llvm/test/Analysis/IR2Vec/Inputs/reference_default_vocab_print.txt index 79fcf82..1b9b3c2 100644 --- a/llvm/test/Analysis/IR2Vec/Inputs/reference_default_vocab_print.txt +++ b/llvm/test/Analysis/IR2Vec/Inputs/reference_default_vocab_print.txt @@ -45,6 +45,7 @@ Key: SIToFP: [ 87.00 88.00 ] Key: FPTrunc: [ 89.00 90.00 ] Key: FPExt: [ 91.00 92.00 ] Key: PtrToInt: [ 93.00 94.00 ] +Key: PtrToAddr: [ 135.00 136.00 ] Key: IntToPtr: [ 95.00 96.00 ] Key: BitCast: [ 97.00 98.00 ] Key: AddrSpaceCast: [ 99.00 100.00 ] diff --git a/llvm/test/Analysis/IR2Vec/Inputs/reference_wtd1_vocab_print.txt b/llvm/test/Analysis/IR2Vec/Inputs/reference_wtd1_vocab_print.txt index 584bd31..9673e7f 100644 --- a/llvm/test/Analysis/IR2Vec/Inputs/reference_wtd1_vocab_print.txt +++ b/llvm/test/Analysis/IR2Vec/Inputs/reference_wtd1_vocab_print.txt @@ -45,6 +45,7 @@ Key: SIToFP: [ 43.50 44.00 ] Key: FPTrunc: [ 44.50 45.00 ] Key: FPExt: [ 45.50 46.00 ] Key: PtrToInt: [ 46.50 47.00 ] +Key: PtrToAddr: [ 67.50 68.00 ] Key: IntToPtr: [ 47.50 48.00 ] Key: BitCast: [ 48.50 49.00 ] Key: AddrSpaceCast: [ 49.50 50.00 ] diff --git a/llvm/test/Analysis/IR2Vec/Inputs/reference_wtd2_vocab_print.txt b/llvm/test/Analysis/IR2Vec/Inputs/reference_wtd2_vocab_print.txt index 2727c85..1f575d2 100644 --- a/llvm/test/Analysis/IR2Vec/Inputs/reference_wtd2_vocab_print.txt +++ b/llvm/test/Analysis/IR2Vec/Inputs/reference_wtd2_vocab_print.txt @@ -45,6 +45,7 @@ Key: SIToFP: [ 8.70 8.80 ] Key: FPTrunc: [ 8.90 9.00 ] Key: FPExt: [ 9.10 9.20 ] Key: PtrToInt: [ 9.30 9.40 ] +Key: PtrToAddr: [ 13.50 13.60 ] Key: IntToPtr: [ 9.50 9.60 ] Key: BitCast: [ 9.70 9.80 ] Key: AddrSpaceCast: [ 9.90 10.00 ] diff --git a/llvm/test/Analysis/KernelInfo/openmp/nvptx.ll b/llvm/test/Analysis/KernelInfo/openmp/nvptx.ll index bd46741..da5a898 100644 --- a/llvm/test/Analysis/KernelInfo/openmp/nvptx.ll +++ b/llvm/test/Analysis/KernelInfo/openmp/nvptx.ll @@ -417,7 +417,7 @@ define internal noundef range(i32 -1, 1024) i32 @__kmpc_target_init(ptr nofree n br label %116 116: ; preds = %110, %128 - call void @llvm.lifetime.start.p0(i64 noundef 8, ptr noundef nonnull align 8 dereferenceable(8) %3) #20 + call void @llvm.lifetime.start.p0(ptr noundef nonnull align 8 dereferenceable(8) %3) #20 tail call void @llvm.nvvm.barrier.sync(i32 noundef 8) %117 = call zeroext i1 @__kmpc_kernel_parallel(ptr noalias nocapture nofree noundef nonnull writeonly align 8 dereferenceable(8) %3) #20 %118 = load ptr, ptr %3, align 8, !tbaa !93 @@ -446,11 +446,11 @@ define internal noundef range(i32 -1, 1024) i32 @__kmpc_target_init(ptr nofree n 128: ; preds = %126, %120 tail call void @llvm.nvvm.barrier.sync(i32 noundef 8) - call void @llvm.lifetime.end.p0(i64 noundef 8, ptr noundef nonnull %3) #20 + call void @llvm.lifetime.end.p0(ptr noundef nonnull %3) #20 br label %116, !llvm.loop !94 129: ; preds = %116 - call void @llvm.lifetime.end.p0(i64 noundef 8, ptr noundef nonnull %3) #20 + call void @llvm.lifetime.end.p0(ptr noundef nonnull %3) #20 br label %130 130: ; preds = %106, %129, %100, %98 @@ -495,7 +495,7 @@ define internal fastcc void @__assert_fail_internal(ptr noundef nonnull derefere declare void @llvm.assume(i1 noundef) #9 ; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #10 +declare void @llvm.lifetime.start.p0(ptr nocapture) #10 ; Function Attrs: convergent nocallback nounwind declare void @llvm.nvvm.barrier.sync(i32) #11 @@ -587,7 +587,7 @@ define internal void @__kmpc_kernel_end_parallel() local_unnamed_addr #13 { } ; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #10 +declare void @llvm.lifetime.end.p0(ptr nocapture) #10 ; Function Attrs: convergent mustprogress nounwind willreturn allockind("free") memory(argmem: readwrite, inaccessiblemem: readwrite) declare extern_weak void @free(ptr allocptr nocapture noundef) local_unnamed_addr #14 @@ -595,11 +595,11 @@ declare extern_weak void @free(ptr allocptr nocapture noundef) local_unnamed_add ; Function Attrs: convergent mustprogress nounwind define internal noundef i32 @_ZN4ompx6printfEPKcz(ptr noundef %0, ...) local_unnamed_addr #15 { %2 = alloca ptr, align 8 - call void @llvm.lifetime.start.p0(i64 noundef 8, ptr noundef nonnull align 8 %2) #29 + call void @llvm.lifetime.start.p0(ptr noundef nonnull align 8 %2) #29 call void @llvm.va_start.p0(ptr noundef nonnull align 8 %2) #27 %3 = load ptr, ptr %2, align 8, !tbaa !101 %4 = call i32 @vprintf(ptr noundef %0, ptr noundef %3) #24 - call void @llvm.lifetime.end.p0(i64 noundef 8, ptr noundef nonnull %2) #20 + call void @llvm.lifetime.end.p0(ptr noundef nonnull %2) #20 ret i32 %4 } @@ -641,7 +641,7 @@ define internal void @__kmpc_target_deinit() #4 { br i1 %14, label %15, label %27 15: ; preds = %11 - call void @llvm.lifetime.start.p0(i64 noundef 8, ptr noundef nonnull align 8 dereferenceable(8) %1) #29 + call void @llvm.lifetime.start.p0(ptr noundef nonnull align 8 dereferenceable(8) %1) #29 %16 = call zeroext i1 @__kmpc_kernel_parallel(ptr noalias nocapture nofree noundef nonnull writeonly align 8 dereferenceable(8) %1) #20 %17 = load i32, ptr @__omp_rtl_debug_kind, align 4, !tbaa !62 %18 = load i32, ptr addrspace(4) @__omp_rtl_device_environment, align 8, !tbaa !83 @@ -659,7 +659,7 @@ define internal void @__kmpc_target_deinit() #4 { 26: ; preds = %15 tail call void @llvm.assume(i1 noundef %23) #23 - call void @llvm.lifetime.end.p0(i64 noundef 8, ptr noundef nonnull %1) #20 + call void @llvm.lifetime.end.p0(ptr noundef nonnull %1) #20 br label %27 27: ; preds = %26, %11, %10, %0 diff --git a/llvm/test/Analysis/LazyValueAnalysis/invalidation.ll b/llvm/test/Analysis/LazyValueAnalysis/invalidation.ll index 71ea5d2..0ad1a33 100644 --- a/llvm/test/Analysis/LazyValueAnalysis/invalidation.ll +++ b/llvm/test/Analysis/LazyValueAnalysis/invalidation.ll @@ -17,13 +17,13 @@ target triple = "x86_64-unknown-linux-gnu" @.str = private unnamed_addr constant [8 x i8] c"a = %l\0A\00", align 1 -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) declare void @hoo(ptr) declare i32 @printf(ptr nocapture readonly, ...) -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) define void @goo(i32 %N, ptr %b) { entry: @@ -38,12 +38,12 @@ for.cond: ; preds = %for.body, %entry br i1 %cmp, label %for.body, label %for.end for.body: ; preds = %for.cond - call void @llvm.lifetime.start.p0(i64 8, ptr %tmp) + call void @llvm.lifetime.start.p0(ptr %tmp) call void @hoo(ptr %a.i) call void @hoo(ptr %c) %tmp1 = load volatile i64, ptr %a.i, align 8 %call.i = call i32 (ptr, ...) @printf(ptr @.str, i64 %tmp1) - call void @llvm.lifetime.end.p0(i64 8, ptr %tmp) + call void @llvm.lifetime.end.p0(ptr %tmp) %inc = add nsw i32 %i.0, 1 br label %for.cond diff --git a/llvm/test/Analysis/MemorySSA/lifetime-simple.ll b/llvm/test/Analysis/MemorySSA/lifetime-simple.ll index 18d2459..03b6768 100644 --- a/llvm/test/Analysis/MemorySSA/lifetime-simple.ll +++ b/llvm/test/Analysis/MemorySSA/lifetime-simple.ll @@ -9,8 +9,8 @@ entry: %P = alloca [32 x i8] %Q = call ptr @obscure(ptr %P) ; CHECK: 1 = MemoryDef(liveOnEntry) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr %P) - call void @llvm.lifetime.start.p0(i64 32, ptr %P) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %P) + call void @llvm.lifetime.start.p0(ptr %P) ; CHECK: MemoryUse(1) ; CHECK-NEXT: %0 = load i8, ptr %P %0 = load i8, ptr %P @@ -18,8 +18,8 @@ entry: ; CHECK-NEXT: store i8 1, ptr %P store i8 1, ptr %P ; CHECK: 3 = MemoryDef(2) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr %P) - call void @llvm.lifetime.end.p0(i64 32, ptr %P) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %P) + call void @llvm.lifetime.end.p0(ptr %P) ; CHECK: MemoryUse(3) ; CHECK-NEXT: %1 = load i8, ptr %P %1 = load i8, ptr %P @@ -28,5 +28,5 @@ entry: %2 = load i8, ptr %Q ret i8 %1 } -declare void @llvm.lifetime.start.p0(i64 %S, ptr nocapture %P) readonly -declare void @llvm.lifetime.end.p0(i64 %S, ptr nocapture %P) +declare void @llvm.lifetime.start.p0(ptr nocapture %P) readonly +declare void @llvm.lifetime.end.p0(ptr nocapture %P) diff --git a/llvm/test/Analysis/MemorySSA/phi-translation.ll b/llvm/test/Analysis/MemorySSA/phi-translation.ll index b824481..22bbead 100644 --- a/llvm/test/Analysis/MemorySSA/phi-translation.ll +++ b/llvm/test/Analysis/MemorySSA/phi-translation.ll @@ -465,7 +465,7 @@ end: ; preds = %for.body define void @use_clobbered_by_def_in_loop() { entry: %nodeStack = alloca [12 x i32], align 4 - call void @llvm.lifetime.start.p0(i64 48, ptr nonnull %nodeStack) + call void @llvm.lifetime.start.p0(ptr nonnull %nodeStack) br i1 false, label %cleanup, label %while.cond ; CHECK-LABEL: while.cond: @@ -502,12 +502,12 @@ while.end: ; preds = %while.cond, %land.r br i1 true, label %cleanup, label %while.cond.backedge cleanup: ; preds = %while.body, %while.end, %entry - call void @llvm.lifetime.end.p0(i64 48, ptr nonnull %nodeStack) + call void @llvm.lifetime.end.p0(ptr nonnull %nodeStack) ret void } -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) define void @another_loop_clobber_inc() { ; CHECK-LABEL: void @another_loop_clobber_inc diff --git a/llvm/test/Analysis/MemorySSA/pr43044.ll b/llvm/test/Analysis/MemorySSA/pr43044.ll index bd767d3..7ae02f3 100644 --- a/llvm/test/Analysis/MemorySSA/pr43044.ll +++ b/llvm/test/Analysis/MemorySSA/pr43044.ll @@ -4,7 +4,7 @@ target datalayout = "E-m:e-i1:8:16-i8:8:16-i64:64-f128:64-v128:64-a:8:16-n32:64" target triple = "s390x-ibm-linux" -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) ; CHECK-LABEL: @func_42() define void @func_42() { diff --git a/llvm/test/Analysis/MemorySSA/pr49859.ll b/llvm/test/Analysis/MemorySSA/pr49859.ll index 25ef586..0e97f57 100644 --- a/llvm/test/Analysis/MemorySSA/pr49859.ll +++ b/llvm/test/Analysis/MemorySSA/pr49859.ll @@ -11,12 +11,12 @@ entry: %n = alloca i8, align 1 %i = alloca i8, align 1 %cleanup.dest.slot = alloca i32, align 1 - call void @llvm.lifetime.start.p0(i64 1, ptr %sum) #3 + call void @llvm.lifetime.start.p0(ptr %sum) #3 store i8 0, ptr %sum, align 1 - call void @llvm.lifetime.start.p0(i64 1, ptr %n) #3 + call void @llvm.lifetime.start.p0(ptr %n) #3 %call = call i8 @idi(i8 10) store i8 %call, ptr %n, align 1 - call void @llvm.lifetime.start.p0(i64 1, ptr %i) #3 + call void @llvm.lifetime.start.p0(ptr %i) #3 store i8 0, ptr %i, align 1 br label %for.cond @@ -61,9 +61,9 @@ for.inc: ; preds = %if.end ; CHECK: final.cleanup: ; CHECK-NEXT: ; [[NO20:.*]] = MemoryPhi({if.then,[[NO9:.*]]},{for.cond.cleanup,[[NO8:.*]]}) ; CHECK-NEXT: ; [[NO12:.*]] = MemoryDef([[NO20]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr %i) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %i) final.cleanup: ; preds = %if.then, %for.cond.cleanup - call void @llvm.lifetime.end.p0(i64 1, ptr %i) #3 + call void @llvm.lifetime.end.p0(ptr %i) #3 br label %for.end ; CHECK: for.end: @@ -71,23 +71,23 @@ final.cleanup: ; preds = %if.then, %for ; CHECK-NEXT: %3 = load i8, ptr %sum, align 1 for.end: ; preds = %final.cleanup %8 = load i8, ptr %sum, align 1 - call void @llvm.lifetime.start.p0(i64 1, ptr %res.addr.i) + call void @llvm.lifetime.start.p0(ptr %res.addr.i) store i8 %8, ptr %res.addr.i, align 1 %9 = load i8, ptr %res.addr.i, align 1 call void @foo(i8 %9) #3 - call void @llvm.lifetime.end.p0(i64 1, ptr %res.addr.i) - call void @llvm.lifetime.end.p0(i64 1, ptr %n) #3 - call void @llvm.lifetime.end.p0(i64 1, ptr %sum) #3 + call void @llvm.lifetime.end.p0(ptr %res.addr.i) + call void @llvm.lifetime.end.p0(ptr %n) #3 + call void @llvm.lifetime.end.p0(ptr %sum) #3 ret void } ; Function Attrs: argmemonly nofree nosync nounwind willreturn -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 declare i8 @idi(i8) ; Function Attrs: argmemonly nofree nosync nounwind willreturn -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 ; Function Attrs: nounwind declare void @foo(i8) diff --git a/llvm/test/Analysis/MemorySSA/renamephis.ll b/llvm/test/Analysis/MemorySSA/renamephis.ll index e297b99..a731ef1 100644 --- a/llvm/test/Analysis/MemorySSA/renamephis.ll +++ b/llvm/test/Analysis/MemorySSA/renamephis.ll @@ -8,7 +8,7 @@ target triple = "x86_64-unknown-linux-gnu" declare void @g() ; Function Attrs: argmemonly nounwind willreturn -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #0 +declare void @llvm.lifetime.end.p0(ptr nocapture) #0 ; CHECK-LABEL: @f define void @f(i1 %arg) align 2 { diff --git a/llvm/test/Analysis/ScalarEvolution/add-expr-pointer-operand-sorting.ll b/llvm/test/Analysis/ScalarEvolution/add-expr-pointer-operand-sorting.ll index 39b475d..7120eec 100644 --- a/llvm/test/Analysis/ScalarEvolution/add-expr-pointer-operand-sorting.ll +++ b/llvm/test/Analysis/ScalarEvolution/add-expr-pointer-operand-sorting.ll @@ -50,7 +50,7 @@ define i32 @d(i32 %base) { ; entry: %e = alloca [1 x [1 x i8]], align 1 - call void @llvm.lifetime.start.p0(i64 1, ptr %e) #2 + call void @llvm.lifetime.start.p0(ptr %e) #2 br label %for.cond for.cond: ; preds = %for.cond, %entry @@ -69,4 +69,4 @@ for.cond: ; preds = %for.cond, %entry br label %for.cond } -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) diff --git a/llvm/test/Analysis/ScalarEvolution/sdiv.ll b/llvm/test/Analysis/ScalarEvolution/sdiv.ll index 9eaaf8b..acc6ab0 100644 --- a/llvm/test/Analysis/ScalarEvolution/sdiv.ll +++ b/llvm/test/Analysis/ScalarEvolution/sdiv.ll @@ -38,7 +38,7 @@ define dso_local void @_Z4loopi(i32 %width) local_unnamed_addr #0 { entry: %storage = alloca [2 x i32], align 4 %0 = bitcast ptr %storage to ptr - call void @llvm.lifetime.start.p0(i64 8, ptr %storage) #4 + call void @llvm.lifetime.start.p0(ptr %storage) #4 call void @llvm.memset.p0.i64(ptr align 4 %0, i8 0, i64 8, i1 false) br label %for.cond @@ -48,7 +48,7 @@ for.cond: br i1 %cmp, label %for.body, label %for.cond.cleanup for.cond.cleanup: - call void @llvm.lifetime.end.p0(i64 8, ptr %storage) #4 + call void @llvm.lifetime.end.p0(ptr %storage) #4 ret void for.body: @@ -64,10 +64,10 @@ for.body: br label %for.cond } -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg) #2 declare dso_local i32 @_Z3adji(i32) local_unnamed_addr #3 -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 diff --git a/llvm/test/Analysis/ScalarEvolution/srem.ll b/llvm/test/Analysis/ScalarEvolution/srem.ll index 377e58a..9d4538f 100644 --- a/llvm/test/Analysis/ScalarEvolution/srem.ll +++ b/llvm/test/Analysis/ScalarEvolution/srem.ll @@ -38,7 +38,7 @@ define dso_local void @_Z4loopi(i32 %width) local_unnamed_addr #0 { entry: %storage = alloca [2 x i32], align 4 %0 = bitcast ptr %storage to ptr - call void @llvm.lifetime.start.p0(i64 8, ptr %storage) #4 + call void @llvm.lifetime.start.p0(ptr %storage) #4 call void @llvm.memset.p0.i64(ptr align 4 %0, i8 0, i64 8, i1 false) br label %for.cond @@ -48,7 +48,7 @@ for.cond: br i1 %cmp, label %for.body, label %for.cond.cleanup for.cond.cleanup: - call void @llvm.lifetime.end.p0(i64 8, ptr %storage) #4 + call void @llvm.lifetime.end.p0(ptr %storage) #4 ret void for.body: @@ -64,10 +64,10 @@ for.body: br label %for.cond } -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg) #2 declare dso_local i32 @_Z3adji(i32) local_unnamed_addr #3 -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 diff --git a/llvm/test/Analysis/ScopedNoAliasAA/alias-scope-merging.ll b/llvm/test/Analysis/ScopedNoAliasAA/alias-scope-merging.ll index 840a517..36d79f9 100644 --- a/llvm/test/Analysis/ScopedNoAliasAA/alias-scope-merging.ll +++ b/llvm/test/Analysis/ScopedNoAliasAA/alias-scope-merging.ll @@ -8,10 +8,10 @@ define i8 @test(i8 %input) { %dst = alloca i8 %src = alloca i8 ; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 %dst, ptr align 8 %src, i64 1, i1 false), !alias.scope ![[SCOPE:[0-9]+]] - call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %src), !noalias !4 + call void @llvm.lifetime.start.p0(ptr nonnull %src), !noalias !4 store i8 %input, ptr %src call void @llvm.memcpy.p0.p0.i64(ptr align 8 %tmp, ptr align 8 %src, i64 1, i1 false), !alias.scope !0 - call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %src), !noalias !4 + call void @llvm.lifetime.end.p0(ptr nonnull %src), !noalias !4 call void @llvm.memcpy.p0.p0.i64(ptr align 8 %dst, ptr align 8 %tmp, i64 1, i1 false), !alias.scope !4 %ret_value = load i8, ptr %dst call void @use(ptr %src) @@ -23,8 +23,8 @@ define i8 @test(i8 %input) { ; CHECK-DAG: ![[CALLEE0_B:[0-9]+]] = distinct !{!{{[0-9]+}}, !{{[0-9]+}}, !"callee0: %b"} ; CHECK-DAG: ![[SCOPE]] = !{![[CALLEE0_A]], ![[CALLEE0_B]]} -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) declare void @llvm.memcpy.p0.p0.i64(ptr, ptr, i64, i1) !0 = !{!1, !7} diff --git a/llvm/test/Analysis/StackSafetyAnalysis/lifetime.ll b/llvm/test/Analysis/StackSafetyAnalysis/lifetime.ll index 6c3dec9..51bfa15 100644 --- a/llvm/test/Analysis/StackSafetyAnalysis/lifetime.ll +++ b/llvm/test/Analysis/StackSafetyAnalysis/lifetime.ll @@ -11,31 +11,31 @@ entry: ; CHECK: %y = alloca i32, align 4 ; CHECK-NEXT: Alive: <> %z = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 -1, ptr %z) -; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %z) + call void @llvm.lifetime.start.p0(ptr %z) +; CHECK: call void @llvm.lifetime.start.p0(ptr %z) ; CHECK-NEXT: Alive: <z> - call void @llvm.lifetime.start.p0(i64 -1, ptr %x) -; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %x) + call void @llvm.lifetime.start.p0(ptr %x) +; CHECK: call void @llvm.lifetime.start.p0(ptr %x) ; CHECK-NEXT: Alive: <x z> call void @capture32(ptr %x) - call void @llvm.lifetime.end.p0(i64 -1, ptr %x) -; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %x) + call void @llvm.lifetime.end.p0(ptr %x) +; CHECK: call void @llvm.lifetime.end.p0(ptr %x) ; CHECK-NEXT: Alive: <z> - call void @llvm.lifetime.start.p0(i64 -1, ptr %y) -; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %y) + call void @llvm.lifetime.start.p0(ptr %y) +; CHECK: call void @llvm.lifetime.start.p0(ptr %y) ; CHECK-NEXT: Alive: <y z> call void @capture32(ptr %y) - call void @llvm.lifetime.end.p0(i64 -1, ptr %y) -; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %y) + call void @llvm.lifetime.end.p0(ptr %y) +; CHECK: call void @llvm.lifetime.end.p0(ptr %y) ; CHECK-NEXT: Alive: <z> call void @capture32(ptr %z) - call void @llvm.lifetime.end.p0(i64 -1, ptr %z) -; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %z) + call void @llvm.lifetime.end.p0(ptr %z) +; CHECK: call void @llvm.lifetime.end.p0(ptr %z) ; CHECK-NEXT: Alive: <> ret void @@ -48,13 +48,13 @@ entry: ; CHECK-NEXT: Alive: <y> %x = alloca i32, align 4 %y = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 -1, ptr %x) -; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %x) + call void @llvm.lifetime.start.p0(ptr %x) +; CHECK: call void @llvm.lifetime.start.p0(ptr %x) ; CHECK-NEXT: Alive: <x y> call void @capture32(ptr %x) - call void @llvm.lifetime.end.p0(i64 -1, ptr %x) -; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %x) + call void @llvm.lifetime.end.p0(ptr %x) +; CHECK: call void @llvm.lifetime.end.p0(ptr %x) ; CHECK-NEXT: Alive: <y> call void @capture32(ptr %y) @@ -69,31 +69,31 @@ entry: %x = alloca i32, align 4 %y = alloca i32, align 4 %z = alloca i64, align 4 - call void @llvm.lifetime.start.p0(i64 -1, ptr %x) -; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %x) + call void @llvm.lifetime.start.p0(ptr %x) +; CHECK: call void @llvm.lifetime.start.p0(ptr %x) ; CHECK-NEXT: Alive: <x> - call void @llvm.lifetime.start.p0(i64 -1, ptr %y) -; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %y) + call void @llvm.lifetime.start.p0(ptr %y) +; CHECK: call void @llvm.lifetime.start.p0(ptr %y) ; CHECK-NEXT: Alive: <x y> call void @capture32(ptr %x) - call void @llvm.lifetime.end.p0(i64 -1, ptr %x) -; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %x) + call void @llvm.lifetime.end.p0(ptr %x) +; CHECK: call void @llvm.lifetime.end.p0(ptr %x) ; CHECK-NEXT: Alive: <y> call void @capture32(ptr %y) - call void @llvm.lifetime.end.p0(i64 -1, ptr %y) -; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %y) + call void @llvm.lifetime.end.p0(ptr %y) +; CHECK: call void @llvm.lifetime.end.p0(ptr %y) ; CHECK-NEXT: Alive: <> - call void @llvm.lifetime.start.p0(i64 -1, ptr %z) -; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %z) + call void @llvm.lifetime.start.p0(ptr %z) +; CHECK: call void @llvm.lifetime.start.p0(ptr %z) ; CHECK-NEXT: Alive: <z> call void @capture64(ptr %z) - call void @llvm.lifetime.end.p0(i64 -1, ptr %z) -; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %z) + call void @llvm.lifetime.end.p0(ptr %z) +; CHECK: call void @llvm.lifetime.end.p0(ptr %z) ; CHECK-NEXT: Alive: <> ret void @@ -111,31 +111,31 @@ entry: ; CHECK-NEXT: Alive: <> %z = alloca i64, align 4 %y = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 -1, ptr %x) -; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %x) + call void @llvm.lifetime.start.p0(ptr %x) +; CHECK: call void @llvm.lifetime.start.p0(ptr %x) ; CHECK-NEXT: Alive: <x> - call void @llvm.lifetime.start.p0(i64 -1, ptr %y) -; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %y) + call void @llvm.lifetime.start.p0(ptr %y) +; CHECK: call void @llvm.lifetime.start.p0(ptr %y) ; CHECK-NEXT: Alive: <x y> - call void @llvm.lifetime.start.p0(i64 -1, ptr %z) -; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %z) + call void @llvm.lifetime.start.p0(ptr %z) +; CHECK: call void @llvm.lifetime.start.p0(ptr %z) ; CHECK-NEXT: Alive: <x y z> call void @capture32(ptr %x) call void @capture32(ptr %y) call void @capture64(ptr %z) - call void @llvm.lifetime.end.p0(i64 -1, ptr %x) -; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %x) + call void @llvm.lifetime.end.p0(ptr %x) +; CHECK: call void @llvm.lifetime.end.p0(ptr %x) ; CHECK-NEXT: Alive: <y z> - call void @llvm.lifetime.end.p0(i64 -1, ptr %y) -; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %y) + call void @llvm.lifetime.end.p0(ptr %y) +; CHECK: call void @llvm.lifetime.end.p0(ptr %y) ; CHECK-NEXT: Alive: <z> - call void @llvm.lifetime.end.p0(i64 -1, ptr %z) -; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %z) + call void @llvm.lifetime.end.p0(ptr %z) +; CHECK: call void @llvm.lifetime.end.p0(ptr %z) ; CHECK-NEXT: Alive: <> ret void @@ -154,12 +154,12 @@ entry: %z = alloca i64, align 8 %z1 = alloca i64, align 8 %z2 = alloca i64, align 8 - call void @llvm.lifetime.start.p0(i64 -1, ptr %x1) -; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %x1) + call void @llvm.lifetime.start.p0(ptr %x1) +; CHECK: call void @llvm.lifetime.start.p0(ptr %x1) ; CHECK-NEXT: Alive: <x1> - call void @llvm.lifetime.start.p0(i64 -1, ptr %x2) -; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %x2) + call void @llvm.lifetime.start.p0(ptr %x2) +; CHECK: call void @llvm.lifetime.start.p0(ptr %x2) ; CHECK-NEXT: Alive: <x1 x2> call void @capture64(ptr nonnull %x1) @@ -171,8 +171,8 @@ entry: if.then: ; preds = %entry ; CHECK: if.then: ; CHECK-NEXT: Alive: <x1 x2> - call void @llvm.lifetime.start.p0(i64 -1, ptr %y) -; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %y) + call void @llvm.lifetime.start.p0(ptr %y) +; CHECK: call void @llvm.lifetime.start.p0(ptr %y) ; CHECK-NEXT: Alive: <x1 x2 y> call void @capture64(ptr nonnull %y) @@ -181,13 +181,13 @@ if.then: ; preds = %entry if.then3: ; preds = %if.then ; CHECK: if.then3: ; CHECK-NEXT: Alive: <x1 x2 y> - call void @llvm.lifetime.start.p0(i64 -1, ptr %y1) -; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %y1) + call void @llvm.lifetime.start.p0(ptr %y1) +; CHECK: call void @llvm.lifetime.start.p0(ptr %y1) ; CHECK-NEXT: Alive: <x1 x2 y y1> call void @capture64(ptr nonnull %y1) - call void @llvm.lifetime.end.p0(i64 -1, ptr %y1) -; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %y1) + call void @llvm.lifetime.end.p0(ptr %y1) +; CHECK: call void @llvm.lifetime.end.p0(ptr %y1) ; CHECK-NEXT: Alive: <x1 x2 y> br label %if.end @@ -195,13 +195,13 @@ if.then3: ; preds = %if.then if.else: ; preds = %if.then ; CHECK: if.else: ; CHECK-NEXT: Alive: <x1 x2 y> - call void @llvm.lifetime.start.p0(i64 -1, ptr %y2) -; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %y2) + call void @llvm.lifetime.start.p0(ptr %y2) +; CHECK: call void @llvm.lifetime.start.p0(ptr %y2) ; CHECK-NEXT: Alive: <x1 x2 y y2> call void @capture64(ptr nonnull %y2) - call void @llvm.lifetime.end.p0(i64 -1, ptr %y2) -; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %y2) + call void @llvm.lifetime.end.p0(ptr %y2) +; CHECK: call void @llvm.lifetime.end.p0(ptr %y2) ; CHECK-NEXT: Alive: <x1 x2 y> br label %if.end @@ -209,8 +209,8 @@ if.else: ; preds = %if.then if.end: ; preds = %if.else, %if.then3 ; CHECK: if.end: ; CHECK-NEXT: Alive: <x1 x2 y> - call void @llvm.lifetime.end.p0(i64 -1, ptr %y) -; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %y) + call void @llvm.lifetime.end.p0(ptr %y) +; CHECK: call void @llvm.lifetime.end.p0(ptr %y) ; CHECK-NEXT: Alive: <x1 x2> br label %if.end9 @@ -222,8 +222,8 @@ if.else4: ; preds = %entry ; CHECK: %z.cast = bitcast ptr %z to ptr ; CHECK-NEXT: Alive: <x1 x2> - call void @llvm.lifetime.start.p0(i64 -1, ptr %z) -; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %z) + call void @llvm.lifetime.start.p0(ptr %z) +; CHECK: call void @llvm.lifetime.start.p0(ptr %z) ; CHECK-NEXT: Alive: <x1 x2 z> call void @capture64(ptr nonnull %z) @@ -232,13 +232,13 @@ if.else4: ; preds = %entry if.then6: ; preds = %if.else4 ; CHECK: if.then6: ; CHECK-NEXT: Alive: <x1 x2 z> - call void @llvm.lifetime.start.p0(i64 -1, ptr %z1) -; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %z1) + call void @llvm.lifetime.start.p0(ptr %z1) +; CHECK: call void @llvm.lifetime.start.p0(ptr %z1) ; CHECK-NEXT: Alive: <x1 x2 z z1> call void @capture64(ptr nonnull %z1) - call void @llvm.lifetime.end.p0(i64 -1, ptr %z1) -; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %z1) + call void @llvm.lifetime.end.p0(ptr %z1) +; CHECK: call void @llvm.lifetime.end.p0(ptr %z1) ; CHECK-NEXT: Alive: <x1 x2 z> br label %if.end8 @@ -246,13 +246,13 @@ if.then6: ; preds = %if.else4 if.else7: ; preds = %if.else4 ; CHECK: if.else7: ; CHECK-NEXT: Alive: <x1 x2 z> - call void @llvm.lifetime.start.p0(i64 -1, ptr %z2) -; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %z2) + call void @llvm.lifetime.start.p0(ptr %z2) +; CHECK: call void @llvm.lifetime.start.p0(ptr %z2) ; CHECK-NEXT: Alive: <x1 x2 z z2> call void @capture64(ptr nonnull %z2) - call void @llvm.lifetime.end.p0(i64 -1, ptr %z2) -; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %z2) + call void @llvm.lifetime.end.p0(ptr %z2) +; CHECK: call void @llvm.lifetime.end.p0(ptr %z2) ; CHECK-NEXT: Alive: <x1 x2 z> br label %if.end8 @@ -260,8 +260,8 @@ if.else7: ; preds = %if.else4 if.end8: ; preds = %if.else7, %if.then6 ; CHECK: if.end8: ; CHECK-NEXT: Alive: <x1 x2 z> - call void @llvm.lifetime.end.p0(i64 -1, ptr %z) -; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %z) + call void @llvm.lifetime.end.p0(ptr %z) +; CHECK: call void @llvm.lifetime.end.p0(ptr %z) ; CHECK-NEXT: Alive: <x1 x2> br label %if.end9 @@ -269,12 +269,12 @@ if.end8: ; preds = %if.else7, %if.then6 if.end9: ; preds = %if.end8, %if.end ; CHECK: if.end9: ; CHECK-NEXT: Alive: <x1 x2> - call void @llvm.lifetime.end.p0(i64 -1, ptr %x2) -; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %x2) + call void @llvm.lifetime.end.p0(ptr %x2) +; CHECK: call void @llvm.lifetime.end.p0(ptr %x2) ; CHECK-NEXT: Alive: <x1> - call void @llvm.lifetime.end.p0(i64 -1, ptr %x1) -; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %x1) + call void @llvm.lifetime.end.p0(ptr %x1) +; CHECK: call void @llvm.lifetime.end.p0(ptr %x1) ; CHECK-NEXT: Alive: <> ret void @@ -287,8 +287,8 @@ entry: ; CHECK-NEXT: Alive: <> %x = alloca i32, align 4 %y = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 -1, ptr %x) -; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %x) + call void @llvm.lifetime.start.p0(ptr %x) +; CHECK: call void @llvm.lifetime.start.p0(ptr %x) ; CHECK-NEXT: Alive: <x> call void @capture32(ptr %x) @@ -297,17 +297,17 @@ entry: bb2: ; preds = %entry ; CHECK: bb2: ; CHECK-NEXT: Alive: <x> - call void @llvm.lifetime.start.p0(i64 -1, ptr %y) -; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %y) + call void @llvm.lifetime.start.p0(ptr %y) +; CHECK: call void @llvm.lifetime.start.p0(ptr %y) ; CHECK-NEXT: Alive: <x y> call void @capture32(ptr %y) - call void @llvm.lifetime.end.p0(i64 -1, ptr %y) -; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %y) + call void @llvm.lifetime.end.p0(ptr %y) +; CHECK: call void @llvm.lifetime.end.p0(ptr %y) ; CHECK-NEXT: Alive: <x> - call void @llvm.lifetime.end.p0(i64 -1, ptr %x) -; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %x) + call void @llvm.lifetime.end.p0(ptr %x) +; CHECK: call void @llvm.lifetime.end.p0(ptr %x) ; CHECK-NEXT: Alive: <> ret void @@ -315,8 +315,8 @@ bb2: ; preds = %entry bb3: ; preds = %entry ; CHECK: bb3: ; CHECK-NEXT: Alive: <x> - call void @llvm.lifetime.end.p0(i64 -1, ptr %x) -; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %x) + call void @llvm.lifetime.end.p0(ptr %x) +; CHECK: call void @llvm.lifetime.end.p0(ptr %x) ; CHECK-NEXT: Alive: <> ret void @@ -329,13 +329,13 @@ entry: ; CHECK-NEXT: Alive: <> %x = alloca i32, align 4 %y = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 -1, ptr %x) -; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %x) + call void @llvm.lifetime.start.p0(ptr %x) +; CHECK: call void @llvm.lifetime.start.p0(ptr %x) ; CHECK-NEXT: Alive: <x> call void @capture32(ptr %x) - call void @llvm.lifetime.end.p0(i64 -1, ptr %x) -; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %x) + call void @llvm.lifetime.end.p0(ptr %x) +; CHECK: call void @llvm.lifetime.end.p0(ptr %x) ; CHECK-NEXT: Alive: <> br i1 %d, label %bb2, label %bb3 @@ -343,13 +343,13 @@ entry: bb2: ; preds = %entry ; CHECK: bb2: ; CHECK-NEXT: Alive: <> - call void @llvm.lifetime.start.p0(i64 -1, ptr %y) -; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %y) + call void @llvm.lifetime.start.p0(ptr %y) +; CHECK: call void @llvm.lifetime.start.p0(ptr %y) ; CHECK-NEXT: Alive: <y> call void @capture32(ptr %y) - call void @llvm.lifetime.end.p0(i64 -1, ptr %y) -; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %y) + call void @llvm.lifetime.end.p0(ptr %y) +; CHECK: call void @llvm.lifetime.end.p0(ptr %y) ; CHECK-NEXT: Alive: <> ret void @@ -367,13 +367,13 @@ entry: ; CHECK-NEXT: Alive: <> %x = alloca i32, align 4 %y = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 -1, ptr %x) -; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %x) + call void @llvm.lifetime.start.p0(ptr %x) +; CHECK: call void @llvm.lifetime.start.p0(ptr %x) ; CHECK-NEXT: Alive: <x> call void @capture32(ptr %x) - call void @llvm.lifetime.end.p0(i64 -1, ptr %x) -; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %x) + call void @llvm.lifetime.end.p0(ptr %x) +; CHECK: call void @llvm.lifetime.end.p0(ptr %x) ; CHECK-NEXT: Alive: <> br i1 %d, label %bb2, label %bb3 @@ -381,8 +381,8 @@ entry: bb2: ; preds = %entry ; CHECK: bb2: ; CHECK-NEXT: Alive: <> - call void @llvm.lifetime.start.p0(i64 -1, ptr %y) -; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %y) + call void @llvm.lifetime.start.p0(ptr %y) +; CHECK: call void @llvm.lifetime.start.p0(ptr %y) ; CHECK-NEXT: Alive: <y> call void @capture32(ptr %y) @@ -401,8 +401,8 @@ entry: ; CHECK-NEXT: Alive: <> %x = alloca i32, align 4 %y = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 -1, ptr %x) -; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %x) + call void @llvm.lifetime.start.p0(ptr %x) +; CHECK: call void @llvm.lifetime.start.p0(ptr %x) ; CHECK-NEXT: Alive: <x> call void @capture32(ptr %x) @@ -411,12 +411,12 @@ entry: bb2: ; preds = %entry ; CHECK: bb2: ; CHECK-NEXT: Alive: <x> - call void @llvm.lifetime.end.p0(i64 -1, ptr %x) -; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %x) + call void @llvm.lifetime.end.p0(ptr %x) +; CHECK: call void @llvm.lifetime.end.p0(ptr %x) ; CHECK-NEXT: Alive: <> - call void @llvm.lifetime.start.p0(i64 -1, ptr %y) -; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %y) + call void @llvm.lifetime.start.p0(ptr %y) +; CHECK: call void @llvm.lifetime.start.p0(ptr %y) ; CHECK-NEXT: Alive: <y> call void @capture32(ptr %y) @@ -436,8 +436,8 @@ entry: %x = alloca i32, align 4 %y = alloca i32, align 4 call void @capture32(ptr %x) - call void @llvm.lifetime.end.p0(i64 -1, ptr %x) -; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %x) + call void @llvm.lifetime.end.p0(ptr %x) +; CHECK: call void @llvm.lifetime.end.p0(ptr %x) ; CHECK-NEXT: Alive: <x> br i1 %d, label %bb2, label %bb3 @@ -445,8 +445,8 @@ entry: bb2: ; preds = %entry ; CHECK: bb2: ; CHECK-NEXT: Alive: <x> - call void @llvm.lifetime.start.p0(i64 -1, ptr %y) -; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %y) + call void @llvm.lifetime.start.p0(ptr %y) +; CHECK: call void @llvm.lifetime.start.p0(ptr %y) ; CHECK-NEXT: Alive: <x y> call void @capture32(ptr %y) @@ -467,12 +467,12 @@ entry: %B.i2 = alloca [100 x i32], align 4 %A.i = alloca [100 x i32], align 4 %B.i = alloca [100 x i32], align 4 - call void @llvm.lifetime.start.p0(i64 -1, ptr %A.i) -; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %A.i) + call void @llvm.lifetime.start.p0(ptr %A.i) +; CHECK: call void @llvm.lifetime.start.p0(ptr %A.i) ; CHECK-NEXT: Alive: <A.i> - call void @llvm.lifetime.start.p0(i64 -1, ptr %B.i) -; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %B.i) + call void @llvm.lifetime.start.p0(ptr %B.i) +; CHECK: call void @llvm.lifetime.start.p0(ptr %B.i) ; CHECK-NEXT: Alive: <A.i B.i> call void @capture100x32(ptr %A.i) @@ -480,30 +480,30 @@ entry: ; CHECK-NEXT: Alive: <A.i B.i> call void @capture100x32(ptr %B.i) - call void @llvm.lifetime.end.p0(i64 -1, ptr %A.i) -; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %A.i) + call void @llvm.lifetime.end.p0(ptr %A.i) +; CHECK: call void @llvm.lifetime.end.p0(ptr %A.i) ; CHECK-NEXT: Alive: <B.i> - call void @llvm.lifetime.end.p0(i64 -1, ptr %B.i) -; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %B.i) + call void @llvm.lifetime.end.p0(ptr %B.i) +; CHECK: call void @llvm.lifetime.end.p0(ptr %B.i) ; CHECK-NEXT: Alive: <> - call void @llvm.lifetime.start.p0(i64 -1, ptr %A.i1) -; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %A.i1) + call void @llvm.lifetime.start.p0(ptr %A.i1) +; CHECK: call void @llvm.lifetime.start.p0(ptr %A.i1) ; CHECK-NEXT: Alive: <A.i1> - call void @llvm.lifetime.start.p0(i64 -1, ptr %B.i2) -; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %B.i2) + call void @llvm.lifetime.start.p0(ptr %B.i2) +; CHECK: call void @llvm.lifetime.start.p0(ptr %B.i2) ; CHECK-NEXT: Alive: <A.i1 B.i2> call void @capture100x32(ptr %A.i1) call void @capture100x32(ptr %B.i2) - call void @llvm.lifetime.end.p0(i64 -1, ptr %A.i1) -; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %A.i1) + call void @llvm.lifetime.end.p0(ptr %A.i1) +; CHECK: call void @llvm.lifetime.end.p0(ptr %A.i1) ; CHECK-NEXT: Alive: <B.i2> - call void @llvm.lifetime.end.p0(i64 -1, ptr %B.i2) -; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %B.i2) + call void @llvm.lifetime.end.p0(ptr %B.i2) +; CHECK: call void @llvm.lifetime.end.p0(ptr %B.i2) ; CHECK-NEXT: Alive: <> ret void @@ -516,20 +516,20 @@ entry: ; CHECK-NEXT: Alive: <> %buf1 = alloca i8, i32 100000, align 16 %buf2 = alloca i8, i32 100000, align 16 - call void @llvm.lifetime.start.p0(i64 -1, ptr %buf1) -; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %buf1) + call void @llvm.lifetime.start.p0(ptr %buf1) +; CHECK: call void @llvm.lifetime.start.p0(ptr %buf1) ; CHECK-NEXT: Alive: <buf1> - call void @llvm.lifetime.end.p0(i64 -1, ptr %buf1) -; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %buf1) + call void @llvm.lifetime.end.p0(ptr %buf1) +; CHECK: call void @llvm.lifetime.end.p0(ptr %buf1) ; CHECK-NEXT: Alive: <> - call void @llvm.lifetime.start.p0(i64 -1, ptr %buf1) -; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %buf1) + call void @llvm.lifetime.start.p0(ptr %buf1) +; CHECK: call void @llvm.lifetime.start.p0(ptr %buf1) ; CHECK-NEXT: Alive: <buf1> - call void @llvm.lifetime.start.p0(i64 -1, ptr %buf2) -; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %buf2) + call void @llvm.lifetime.start.p0(ptr %buf2) +; CHECK: call void @llvm.lifetime.start.p0(ptr %buf2) ; CHECK-NEXT: Alive: <buf1 buf2> call void @capture8(ptr %buf1) @@ -546,22 +546,22 @@ entry: %B.i2 = alloca [100 x i32], align 4 %A.i = alloca [100 x i32], align 4 %B.i = alloca [100 x i32], align 4 - call void @llvm.lifetime.start.p0(i64 -1, ptr %A.i) -; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %A.i) + call void @llvm.lifetime.start.p0(ptr %A.i) +; CHECK: call void @llvm.lifetime.start.p0(ptr %A.i) ; CHECK-NEXT: Alive: <A.i A.i1 B.i2> - call void @llvm.lifetime.start.p0(i64 -1, ptr %B.i) -; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %B.i) + call void @llvm.lifetime.start.p0(ptr %B.i) +; CHECK: call void @llvm.lifetime.start.p0(ptr %B.i) ; CHECK-NEXT: Alive: <A.i A.i1 B.i B.i2> call void @capture100x32(ptr %A.i) call void @capture100x32(ptr %B.i) - call void @llvm.lifetime.end.p0(i64 -1, ptr %A.i) -; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %A.i) + call void @llvm.lifetime.end.p0(ptr %A.i) +; CHECK: call void @llvm.lifetime.end.p0(ptr %A.i) ; CHECK-NEXT: Alive: <A.i1 B.i B.i2> - call void @llvm.lifetime.end.p0(i64 -1, ptr %B.i) -; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %B.i) + call void @llvm.lifetime.end.p0(ptr %B.i) +; CHECK: call void @llvm.lifetime.end.p0(ptr %B.i) ; CHECK-NEXT: Alive: <A.i1 B.i2> br label %block2 @@ -583,23 +583,23 @@ entry: ; CHECK-NEXT: Alive: <> %a.i = alloca [4 x %struct.Klass], align 16 %b.i = alloca [4 x %struct.Klass], align 16 - call void @llvm.lifetime.start.p0(i64 -1, ptr %a.i) -; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %a.i) + call void @llvm.lifetime.start.p0(ptr %a.i) +; CHECK: call void @llvm.lifetime.start.p0(ptr %a.i) ; CHECK-NEXT: Alive: <a.i> - call void @llvm.lifetime.start.p0(i64 -1, ptr %b.i) -; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %b.i) + call void @llvm.lifetime.start.p0(ptr %b.i) +; CHECK: call void @llvm.lifetime.start.p0(ptr %b.i) ; CHECK-NEXT: Alive: <a.i b.i> call void @capture8(ptr %a.i) call void @capture8(ptr %b.i) %z3 = load i32, ptr %a.i, align 16 - call void @llvm.lifetime.end.p0(i64 -1, ptr %a.i) -; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %a.i) + call void @llvm.lifetime.end.p0(ptr %a.i) +; CHECK: call void @llvm.lifetime.end.p0(ptr %a.i) ; CHECK-NEXT: Alive: <b.i> - call void @llvm.lifetime.end.p0(i64 -1, ptr %b.i) -; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %b.i) + call void @llvm.lifetime.end.p0(ptr %b.i) +; CHECK: call void @llvm.lifetime.end.p0(ptr %b.i) ; CHECK-NEXT: Alive: <> ret i32 %z3 @@ -611,8 +611,8 @@ entry: ; CHECK: entry: ; CHECK-NEXT: Alive: <> %x = alloca i8, align 4 - call void @llvm.lifetime.start.p0(i64 1, ptr %x) -; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %x) + call void @llvm.lifetime.start.p0(ptr %x) +; CHECK: call void @llvm.lifetime.start.p0(ptr %x) ; CHECK-NEXT: Alive: <x> br label %l2 @@ -622,8 +622,8 @@ l2: ; preds = %l2, %entry ; MAY-NEXT: Alive: <x> ; MUST-NEXT: Alive: <> call void @capture8(ptr %x) - call void @llvm.lifetime.end.p0(i64 1, ptr %x) -; CHECK: call void @llvm.lifetime.end.p0(i64 1, ptr %x) + call void @llvm.lifetime.end.p0(ptr %x) +; CHECK: call void @llvm.lifetime.end.p0(ptr %x) ; CHECK-NEXT: Alive: <> br label %l2 @@ -636,8 +636,8 @@ entry: ; CHECK-NEXT: Alive: <> %x = alloca i8, align 4 %y = alloca i8, align 4 - call void @llvm.lifetime.start.p0(i64 1, ptr %x) -; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %x) + call void @llvm.lifetime.start.p0(ptr %x) +; CHECK: call void @llvm.lifetime.start.p0(ptr %x) ; CHECK-NEXT: Alive: <x> br label %l2 @@ -645,17 +645,17 @@ entry: l2: ; preds = %l2, %entry ; CHECK: l2: ; CHECK-NEXT: Alive: <x> - call void @llvm.lifetime.start.p0(i64 1, ptr %y) -; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %y) + call void @llvm.lifetime.start.p0(ptr %y) +; CHECK: call void @llvm.lifetime.start.p0(ptr %y) ; CHECK-NEXT: Alive: <x y> call void @capture8(ptr %y) - call void @llvm.lifetime.end.p0(i64 1, ptr %y) -; CHECK: call void @llvm.lifetime.end.p0(i64 1, ptr %y) + call void @llvm.lifetime.end.p0(ptr %y) +; CHECK: call void @llvm.lifetime.end.p0(ptr %y) ; CHECK-NEXT: Alive: <x> - call void @llvm.lifetime.start.p0(i64 1, ptr %x) -; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %x) + call void @llvm.lifetime.start.p0(ptr %x) +; CHECK: call void @llvm.lifetime.start.p0(ptr %x) ; CHECK-NEXT: Alive: <x> call void @capture8(ptr %x) @@ -677,24 +677,24 @@ entry: if.then: ; preds = %entry ; CHECK: if.then: ; CHECK-NEXT: Alive: <> - call void @llvm.lifetime.start.p0(i64 500, ptr nonnull %a) -; CHECK: call void @llvm.lifetime.start.p0(i64 500, ptr nonnull %a) + call void @llvm.lifetime.start.p0(ptr nonnull %a) +; CHECK: call void @llvm.lifetime.start.p0(ptr nonnull %a) ; CHECK-NEXT: Alive: <a> tail call void @capture8(ptr %a) - call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %a) -; CHECK: call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %a) + call void @llvm.lifetime.end.p0(ptr nonnull %a) +; CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %a) ; CHECK-NEXT: Alive: <> br label %if.end if.else: ; preds = %entry ; CHECK: if.else: ; CHECK-NEXT: Alive: <> - call void @llvm.lifetime.start.p0(i64 500, ptr nonnull %b) -; CHECK: call void @llvm.lifetime.start.p0(i64 500, ptr nonnull %b) + call void @llvm.lifetime.start.p0(ptr nonnull %b) +; CHECK: call void @llvm.lifetime.start.p0(ptr nonnull %b) ; CHECK-NEXT: Alive: <b> tail call void @capture8(ptr %b) - call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %b) -; CHECK: call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %b) + call void @llvm.lifetime.end.p0(ptr nonnull %b) +; CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %b) ; CHECK-NEXT: Alive: <> br label %if.end @@ -719,8 +719,8 @@ entry: if.then: ; CHECK: if.then: ; CHECK-NEXT: Alive: <> - call void @llvm.lifetime.start.p0(i64 1, ptr %y) -; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %y) + call void @llvm.lifetime.start.p0(ptr %y) +; CHECK: call void @llvm.lifetime.start.p0(ptr %y) ; CHECK-NEXT: Alive: <y> br label %if.end @@ -730,12 +730,12 @@ if.then: if.else: ; CHECK: if.else: ; CHECK-NEXT: Alive: <> - call void @llvm.lifetime.start.p0(i64 1, ptr %y) -; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %y) + call void @llvm.lifetime.start.p0(ptr %y) +; CHECK: call void @llvm.lifetime.start.p0(ptr %y) ; CHECK-NEXT: Alive: <y> - call void @llvm.lifetime.start.p0(i64 1, ptr %x) -; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %x) + call void @llvm.lifetime.start.p0(ptr %x) +; CHECK: call void @llvm.lifetime.start.p0(ptr %x) ; CHECK-NEXT: Alive: <x y> br label %if.end @@ -758,12 +758,12 @@ entry: %x = alloca i8, align 4 %y = alloca i8, align 4 - call void @llvm.lifetime.start.p0(i64 1, ptr %y) -; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %y) + call void @llvm.lifetime.start.p0(ptr %y) +; CHECK: call void @llvm.lifetime.start.p0(ptr %y) ; CHECK-NEXT: Alive: <y> - call void @llvm.lifetime.start.p0(i64 1, ptr %x) -; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %x) + call void @llvm.lifetime.start.p0(ptr %x) +; CHECK: call void @llvm.lifetime.start.p0(ptr %x) ; CHECK-NEXT: Alive: <x y> br label %end @@ -773,7 +773,7 @@ entry: dead: ; CHECK: dead: ; CHECK-NOT: Alive: - call void @llvm.lifetime.start.p0(i64 4, ptr %y) + call void @llvm.lifetime.start.p0(ptr %y) br label %end ; CHECK: br label %end @@ -792,20 +792,20 @@ entry: ; CHECK: entry: ; CHECK-NEXT: Alive: <> %x = alloca i8 - call void @llvm.lifetime.start.p0(i64 1, ptr %x) -; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %x) + call void @llvm.lifetime.start.p0(ptr %x) +; CHECK: call void @llvm.lifetime.start.p0(ptr %x) ; CHECK-NEXT: Alive: <x> - call void @llvm.lifetime.start.p0(i64 1, ptr %x) -; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %x) + call void @llvm.lifetime.start.p0(ptr %x) +; CHECK: call void @llvm.lifetime.start.p0(ptr %x) ; CHECK-NEXT: Alive: <x> - call void @llvm.lifetime.end.p0(i64 1, ptr %x) -; CHECK: call void @llvm.lifetime.end.p0(i64 1, ptr %x) + call void @llvm.lifetime.end.p0(ptr %x) +; CHECK: call void @llvm.lifetime.end.p0(ptr %x) ; CHECK-NEXT: Alive: <> - call void @llvm.lifetime.end.p0(i64 1, ptr %x) -; CHECK: call void @llvm.lifetime.end.p0(i64 1, ptr %x) + call void @llvm.lifetime.end.p0(ptr %x) +; CHECK: call void @llvm.lifetime.end.p0(ptr %x) ; CHECK-NEXT: Alive: <> ret void @@ -827,8 +827,8 @@ if.then: ; CHECK: if.then: ; MAY-NEXT: Alive: <x y> ; MUST-NEXT: Alive: <> - call void @llvm.lifetime.end.p0(i64 1, ptr %y) -; CHECK: call void @llvm.lifetime.end.p0(i64 1, ptr %y) + call void @llvm.lifetime.end.p0(ptr %y) +; CHECK: call void @llvm.lifetime.end.p0(ptr %y) ; MAY-NEXT: Alive: <x> ; MUST-NEXT: Alive: <> @@ -840,12 +840,12 @@ if.then: if.else: ; CHECK: if.else: ; CHECK-NEXT: Alive: <> - call void @llvm.lifetime.start.p0(i64 1, ptr %y) -; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %y) + call void @llvm.lifetime.start.p0(ptr %y) +; CHECK: call void @llvm.lifetime.start.p0(ptr %y) ; CHECK-NEXT: Alive: <y> - call void @llvm.lifetime.start.p0(i64 1, ptr %x) -; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %x) + call void @llvm.lifetime.start.p0(ptr %x) +; CHECK: call void @llvm.lifetime.start.p0(ptr %x) ; CHECK-NEXT: Alive: <x y> br label %if.then @@ -868,8 +868,8 @@ entry: %x = alloca i8, align 4 %y = alloca i8, align 4 - call void @llvm.lifetime.start.p0(i64 1, ptr %x) -; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %x) + call void @llvm.lifetime.start.p0(ptr %x) +; CHECK: call void @llvm.lifetime.start.p0(ptr %x) ; CHECK-NEXT: Alive: <x> br i1 %a, label %if.then, label %if.else @@ -880,8 +880,8 @@ if.then: ; CHECK: if.then: ; MAY-NEXT: Alive: <x> ; MUST-NEXT: Alive: <> - call void @llvm.lifetime.start.p0(i64 1, ptr %y) -; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %y) + call void @llvm.lifetime.start.p0(ptr %y) +; CHECK: call void @llvm.lifetime.start.p0(ptr %y) ; MAY-NEXT: Alive: <x y> ; MUST-NEXT: Alive: <y> @@ -893,12 +893,12 @@ if.then: if.else: ; CHECK: if.else: ; CHECK-NEXT: Alive: <x> - call void @llvm.lifetime.end.p0(i64 1, ptr %y) -; CHECK: call void @llvm.lifetime.end.p0(i64 1, ptr %y) + call void @llvm.lifetime.end.p0(ptr %y) +; CHECK: call void @llvm.lifetime.end.p0(ptr %y) ; CHECK-NEXT: Alive: <x> - call void @llvm.lifetime.end.p0(i64 1, ptr %x) -; CHECK: call void @llvm.lifetime.end.p0(i64 1, ptr %x) + call void @llvm.lifetime.end.p0(ptr %x) +; CHECK: call void @llvm.lifetime.end.p0(ptr %x) ; CHECK-NEXT: Alive: <> br label %if.then @@ -921,8 +921,8 @@ entry: %x = alloca i8, align 4 %y = alloca i8, align 4 - call void @llvm.lifetime.start.p0(i64 1, ptr %x) -; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %x) + call void @llvm.lifetime.start.p0(ptr %x) +; CHECK: call void @llvm.lifetime.start.p0(ptr %x) ; CHECK-NEXT: Alive: <x> br i1 %a, label %if.then, label %if.end @@ -933,8 +933,8 @@ if.then: ; CHECK: if.then: ; MAY-NEXT: Alive: <x y> ; MUST-NEXT: Alive: <x> - call void @llvm.lifetime.start.p0(i64 1, ptr %y) -; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %y) + call void @llvm.lifetime.start.p0(ptr %y) +; CHECK: call void @llvm.lifetime.start.p0(ptr %y) ; CHECK-NEXT: Alive: <x y> br i1 %a, label %if.then, label %if.end @@ -949,8 +949,8 @@ if.end: ret void } -declare void @llvm.lifetime.start.p0(i64, ptr captures(none)) -declare void @llvm.lifetime.end.p0(i64, ptr captures(none)) +declare void @llvm.lifetime.start.p0(ptr captures(none)) +declare void @llvm.lifetime.end.p0(ptr captures(none)) declare void @capture8(ptr) declare void @capture32(ptr) declare void @capture64(ptr) diff --git a/llvm/test/Analysis/StackSafetyAnalysis/local.ll b/llvm/test/Analysis/StackSafetyAnalysis/local.ll index 02d46c8..6944f38 100644 --- a/llvm/test/Analysis/StackSafetyAnalysis/local.ll +++ b/llvm/test/Analysis/StackSafetyAnalysis/local.ll @@ -707,9 +707,9 @@ entry: %n = load i8, ptr %y call void @llvm.memset.p0.i32(ptr nonnull %z, i8 0, i32 1, i1 false) - call void @llvm.lifetime.start.p0(i64 1, ptr %x) - call void @llvm.lifetime.start.p0(i64 1, ptr %y) - call void @llvm.lifetime.start.p0(i64 1, ptr %z) + call void @llvm.lifetime.start.p0(ptr %x) + call void @llvm.lifetime.start.p0(ptr %y) + call void @llvm.lifetime.start.p0(ptr %z) ret void } @@ -731,9 +731,9 @@ entry: %y = alloca i8, align 4 %z = alloca i8, align 4 - call void @llvm.lifetime.start.p0(i64 1, ptr %x) - call void @llvm.lifetime.start.p0(i64 1, ptr %y) - call void @llvm.lifetime.start.p0(i64 1, ptr %z) + call void @llvm.lifetime.start.p0(ptr %x) + call void @llvm.lifetime.start.p0(ptr %y) + call void @llvm.lifetime.start.p0(ptr %z) store i8 5, ptr %x %n = load i8, ptr %y @@ -756,13 +756,13 @@ entry: %y = alloca i8, align 4 %z = alloca i8, align 4 - call void @llvm.lifetime.start.p0(i64 1, ptr %x) - call void @llvm.lifetime.start.p0(i64 1, ptr %y) - call void @llvm.lifetime.start.p0(i64 1, ptr %z) + call void @llvm.lifetime.start.p0(ptr %x) + call void @llvm.lifetime.start.p0(ptr %y) + call void @llvm.lifetime.start.p0(ptr %z) - call void @llvm.lifetime.end.p0(i64 1, ptr %x) - call void @llvm.lifetime.end.p0(i64 1, ptr %y) - call void @llvm.lifetime.end.p0(i64 1, ptr %z) + call void @llvm.lifetime.end.p0(ptr %x) + call void @llvm.lifetime.end.p0(ptr %y) + call void @llvm.lifetime.end.p0(ptr %z) store i8 5, ptr %x %n = load i8, ptr %y @@ -973,13 +973,13 @@ define void @DoubleLifetime() { ; CHECK-EMPTY: entry: %a = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr %a) - call void @llvm.lifetime.end.p0(i64 4, ptr %a) + call void @llvm.lifetime.start.p0(ptr %a) + call void @llvm.lifetime.end.p0(ptr %a) call void @llvm.memset.p0.i32(ptr %a, i8 1, i32 4, i1 true) - call void @llvm.lifetime.start.p0(i64 4, ptr %a) + call void @llvm.lifetime.start.p0(ptr %a) call void @llvm.memset.p0.i32(ptr %a, i8 1, i32 4, i1 false) - call void @llvm.lifetime.end.p0(i64 4, ptr %a) + call void @llvm.lifetime.end.p0(ptr %a) ret void } @@ -993,13 +993,13 @@ define void @DoubleLifetime2() { ; CHECK-EMPTY: entry: %a = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr %a) - call void @llvm.lifetime.end.p0(i64 4, ptr %a) + call void @llvm.lifetime.start.p0(ptr %a) + call void @llvm.lifetime.end.p0(ptr %a) %n = load i32, ptr %a - call void @llvm.lifetime.start.p0(i64 4, ptr %a) + call void @llvm.lifetime.start.p0(ptr %a) call void @llvm.memset.p0.i32(ptr %a, i8 1, i32 4, i1 false) - call void @llvm.lifetime.end.p0(i64 4, ptr %a) + call void @llvm.lifetime.end.p0(ptr %a) ret void } @@ -1013,13 +1013,13 @@ define void @DoubleLifetime3() { ; CHECK-EMPTY: entry: %a = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr %a) - call void @llvm.lifetime.end.p0(i64 4, ptr %a) + call void @llvm.lifetime.start.p0(ptr %a) + call void @llvm.lifetime.end.p0(ptr %a) store i32 5, ptr %a - call void @llvm.lifetime.start.p0(i64 4, ptr %a) + call void @llvm.lifetime.start.p0(ptr %a) call void @llvm.memset.p0.i32(ptr %a, i8 1, i32 4, i1 false) - call void @llvm.lifetime.end.p0(i64 4, ptr %a) + call void @llvm.lifetime.end.p0(ptr %a) ret void } @@ -1033,9 +1033,9 @@ define void @DoubleLifetime4() { ; CHECK-EMPTY: entry: %a = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr %a) + call void @llvm.lifetime.start.p0(ptr %a) call void @llvm.memset.p0.i32(ptr %a, i8 1, i32 4, i1 false) - call void @llvm.lifetime.end.p0(i64 4, ptr %a) + call void @llvm.lifetime.end.p0(ptr %a) call void @unknown_call(ptr %a) ret void } @@ -1136,5 +1136,5 @@ entry: ret ptr null } -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) diff --git a/llvm/test/Analysis/ValueTracking/pr152700.ll b/llvm/test/Analysis/ValueTracking/pr152700.ll new file mode 100644 index 0000000..91644c5 --- /dev/null +++ b/llvm/test/Analysis/ValueTracking/pr152700.ll @@ -0,0 +1,28 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt < %s -passes=instcombine -S | FileCheck %s + +declare noundef i32 @llvm.nvvm.read.ptx.sreg.nctaid.x() +declare i32 @llvm.umin.i32(i32, i32) +define i32 @foo(i1 %c, i32 %arg) { +; CHECK-LABEL: define i32 @foo( +; CHECK-SAME: i1 [[C:%.*]], i32 [[ARG:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[I:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.nctaid.x() +; CHECK-NEXT: br i1 [[C]], label %[[BB_1:.*]], label %[[BB_2:.*]] +; CHECK: [[BB_1]]: +; CHECK-NEXT: br label %[[BB_2]] +; CHECK: [[BB_2]]: +; CHECK-NEXT: [[PHI:%.*]] = phi i32 [ [[I]], %[[ENTRY]] ], [ 0, %[[BB_1]] ] +; CHECK-NEXT: [[RES:%.*]] = call i32 @llvm.umin.i32(i32 [[PHI]], i32 [[ARG]]) +; CHECK-NEXT: ret i32 [[RES]] +; +entry: + %i = call i32 @llvm.nvvm.read.ptx.sreg.nctaid.x() + br i1 %c, label %bb.1, label %bb.2 +bb.1: + br label %bb.2 +bb.2: + %phi = phi i32 [ %i, %entry ], [ 0, %bb.1 ] + %res = call i32 @llvm.umin.i32(i32 %phi, i32 %arg) + ret i32 %res +} diff --git a/llvm/test/Assembler/auto_upgrade_intrinsics.ll b/llvm/test/Assembler/auto_upgrade_intrinsics.ll index d1b535b..37cb496 100644 --- a/llvm/test/Assembler/auto_upgrade_intrinsics.ll +++ b/llvm/test/Assembler/auto_upgrade_intrinsics.ll @@ -171,10 +171,10 @@ define void @tests.lifetime.start.end() { ; CHECK-LABEL: @tests.lifetime.start.end( %a = alloca i8 call void @llvm.lifetime.start(i64 1, ptr %a) - ; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %a) + ; CHECK: call void @llvm.lifetime.start.p0(ptr %a) store i8 0, ptr %a call void @llvm.lifetime.end(i64 1, ptr %a) - ; CHECK: call void @llvm.lifetime.end.p0(i64 1, ptr %a) + ; CHECK: call void @llvm.lifetime.end.p0(ptr %a) ret void } @@ -185,10 +185,10 @@ define void @tests.lifetime.start.end.unnamed() { ; CHECK-LABEL: @tests.lifetime.start.end.unnamed( %a = alloca ptr call void @llvm.lifetime.start.unnamed(i64 1, ptr %a) - ; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %a) + ; CHECK: call void @llvm.lifetime.start.p0(ptr %a) store ptr null, ptr %a call void @llvm.lifetime.end.unnamed(i64 1, ptr %a) - ; CHECK: call void @llvm.lifetime.end.p0(i64 1, ptr %a) + ; CHECK: call void @llvm.lifetime.end.p0(ptr %a) ret void } @@ -220,5 +220,5 @@ define void @test.prefetch.unnamed(ptr %ptr) { ; emitted at the end. ; CHECK: declare i32 @llvm.objectsize.i32.p0 -; CHECK: declare void @llvm.lifetime.start.p0(i64 immarg, ptr captures(none)) -; CHECK: declare void @llvm.lifetime.end.p0(i64 immarg, ptr captures(none)) +; CHECK: declare void @llvm.lifetime.start.p0(ptr captures(none)) +; CHECK: declare void @llvm.lifetime.end.p0(ptr captures(none)) diff --git a/llvm/test/Assembler/autoupgrade-lifetime-intrinsics.ll b/llvm/test/Assembler/autoupgrade-lifetime-intrinsics.ll index 00ab934..377c002 100644 --- a/llvm/test/Assembler/autoupgrade-lifetime-intrinsics.ll +++ b/llvm/test/Assembler/autoupgrade-lifetime-intrinsics.ll @@ -5,8 +5,8 @@ define void @strip_bitcast() { ; CHECK-LABEL: define void @strip_bitcast() { ; CHECK-NEXT: [[A:%.*]] = alloca i8, align 1 ; CHECK-NEXT: [[B:%.*]] = bitcast ptr [[A]] to ptr -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[A]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]]) ; CHECK-NEXT: ret void ; %a = alloca i8 @@ -20,8 +20,8 @@ define void @strip_addrspacecast() { ; CHECK-LABEL: define void @strip_addrspacecast() { ; CHECK-NEXT: [[A:%.*]] = alloca i8, align 1 ; CHECK-NEXT: [[B:%.*]] = addrspacecast ptr [[A]] to ptr addrspace(1) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[A]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]]) ; CHECK-NEXT: ret void ; %a = alloca i8 @@ -35,8 +35,8 @@ define void @strip_gep() { ; CHECK-LABEL: define void @strip_gep() { ; CHECK-NEXT: [[A:%.*]] = alloca [2 x i8], align 1 ; CHECK-NEXT: [[B:%.*]] = getelementptr [2 x i8], ptr [[A]], i64 0, i64 0 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[A]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]]) ; CHECK-NEXT: ret void ; %a = alloca [2 x i8] @@ -55,3 +55,8 @@ define void @remove_unanalyzable(ptr %p) { call void @llvm.lifetime.end.p0(i64 1, ptr %p) ret void } + +declare void @llvm.lifetime.start.p0(i64, ptr) +declare void @llvm.lifetime.end.p0(i64, ptr) +declare void @llvm.lifetime.start.p1(i64, ptr addrspace(1)) +declare void @llvm.lifetime.end.p1(i64, ptr addrspace(1)) diff --git a/llvm/test/Assembler/ptrtoaddr-invalid-constexpr.ll b/llvm/test/Assembler/ptrtoaddr-invalid-constexpr.ll new file mode 100644 index 0000000..665deff --- /dev/null +++ b/llvm/test/Assembler/ptrtoaddr-invalid-constexpr.ll @@ -0,0 +1,56 @@ +;; Check all requirements on the ptrtoaddr constant expression operands +;; Most of these invalid cases are detected at parse time but some are only +;; detected at verification time (see Verifier::visitPtrToAddrInst()) +; RUN: rm -rf %t && split-file --leading-lines %s %t + +;--- src_vec_dst_no_vec.ll +; RUN: not llvm-as %t/src_vec_dst_no_vec.ll -o /dev/null 2>&1 | FileCheck -check-prefix=SRC_VEC_DST_NO_VEC %s --implicit-check-not="error:" +@g = global i64 ptrtoaddr (<2 x ptr> <ptr @g, ptr @g> to i64) +; SRC_VEC_DST_NO_VEC: [[#@LINE-1]]:17: error: invalid cast opcode for cast from '<2 x ptr>' to 'i64' + +;--- src_no_vec_dst_vec.ll +; RUN: not llvm-as %t/src_no_vec_dst_vec.ll -o /dev/null 2>&1 | FileCheck -check-prefix=SRC_NO_VEC_DST_VEC %s --implicit-check-not="error:" +@g = global <2 x i64> ptrtoaddr (ptr @g to <2 x i64>) +; SRC_NO_VEC_DST_VEC: [[#@LINE-1]]:23: error: invalid cast opcode for cast from 'ptr' to '<2 x i64>' + +;--- dst_not_int.ll +; RUN: not llvm-as %t/dst_not_int.ll -o /dev/null 2>&1 | FileCheck -check-prefix=DST_NOT_INT %s --implicit-check-not="error:" +@g = global float ptrtoaddr (ptr @g to float) +; DST_NOT_INT: [[#@LINE-1]]:19: error: invalid cast opcode for cast from 'ptr' to 'float' + +;--- dst_not_int_vec.ll +; RUN: not llvm-as %t/dst_not_int_vec.ll -o /dev/null 2>&1 | FileCheck -check-prefix=DST_NOT_INT_VEC %s --implicit-check-not="error:" +@g = global <2 x float> ptrtoaddr (<2 x ptr> <ptr @g, ptr @g> to <2 x float>) +; DST_NOT_INT_VEC: [[#@LINE-1]]:25: error: invalid cast opcode for cast from '<2 x ptr>' to '<2 x float>' + +;--- src_not_ptr.ll +; RUN: not llvm-as %t/src_not_ptr.ll -o /dev/null 2>&1 | FileCheck -check-prefix=SRC_NOT_PTR %s --implicit-check-not="error:" +@g = global i64 ptrtoaddr (i32 1 to i64) +; SRC_NOT_PTR: [[#@LINE-1]]:17: error: invalid cast opcode for cast from 'i32' to 'i64' + +;--- src_not_ptr_vec.ll +; RUN: not llvm-as %t/src_not_ptr_vec.ll -o /dev/null 2>&1 | FileCheck -check-prefix=SRC_NOT_PTR_VEC %s --implicit-check-not="error:" +@g = global <2 x i64> ptrtoaddr (<2 x i32> <i32 1, i32 2> to <2 x i64>) +; SRC_NOT_PTR_VEC: [[#@LINE-1]]:23: error: invalid cast opcode for cast from '<2 x i32>' to '<2 x i64>' + +;--- vec_src_fewer_elems.ll +; RUN: not llvm-as %t/vec_src_fewer_elems.ll -o /dev/null 2>&1 | FileCheck -check-prefix=VEC_SRC_FEWER_ELEMS %s --implicit-check-not="error:" +@g = global <4 x i64> ptrtoaddr (<2 x ptr> <ptr @g, ptr @g> to <4 x i64>) +; VEC_SRC_FEWER_ELEMS: [[#@LINE-1]]:23: error: invalid cast opcode for cast from '<2 x ptr>' to '<4 x i64>' + +;--- vec_dst_fewer_elems.ll +; RUN: not llvm-as %t/vec_dst_fewer_elems.ll -o /dev/null 2>&1 | FileCheck -check-prefix=VEC_DST_FEWER_ELEMS %s --implicit-check-not="error:" +@g = global <2 x i64> ptrtoaddr (<4 x ptr> <ptr @g, ptr @g, ptr @g, ptr @g> to <2 x i64>) +; VEC_DST_FEWER_ELEMS: [[#@LINE-1]]:23: error: invalid cast opcode for cast from '<4 x ptr>' to '<2 x i64>' + +;--- dst_not_addr_size.ll +; The following invalid IR is caught by the verifier, not the parser: +; RUN: llvm-as %t/dst_not_addr_size.ll --disable-output --disable-verify +; RUN: not llvm-as %t/dst_not_addr_size.ll -o /dev/null 2>&1 | FileCheck -check-prefix=DST_NOT_ADDR_SIZE %s --implicit-check-not="error:" +; DST_NOT_ADDR_SIZE: assembly parsed, but does not verify as correct! +@g = global i32 ptrtoaddr (ptr @g to i32) +; DST_NOT_ADDR_SIZE-NEXT: PtrToAddr result must be address width +; DST_NOT_ADDR_SIZE-NEXT: i32 ptrtoaddr (ptr @g to i32) +@g_vec = global <4 x i32> ptrtoaddr (<4 x ptr> <ptr @g, ptr @g, ptr @g, ptr @g> to <4 x i32>) +; TODO: Verifier.cpp does not visit ConstantVector/ConstantStruct values +; TODO-DST_NOT_ADDR_SIZE: PtrToAddr result must be address width diff --git a/llvm/test/Assembler/ptrtoaddr-invalid.ll b/llvm/test/Assembler/ptrtoaddr-invalid.ll new file mode 100644 index 0000000..dff787b --- /dev/null +++ b/llvm/test/Assembler/ptrtoaddr-invalid.ll @@ -0,0 +1,84 @@ +;; Check all requirements on the ptrtoaddr instruction operands +;; Most of these invalid cases are detected at parse time but some are only +;; detected at verification time (see Verifier::visitPtrToAddrInst()) +; RUN: rm -rf %t && split-file --leading-lines %s %t + +;--- src_vec_dst_no_vec.ll +; RUN: not llvm-as %t/src_vec_dst_no_vec.ll -o /dev/null 2>&1 | FileCheck -check-prefix=SRC_VEC_DST_NO_VEC %s --implicit-check-not="error:" +define i64 @bad(<2 x ptr> %p) { + %addr = ptrtoaddr <2 x ptr> %p to i64 + ; SRC_VEC_DST_NO_VEC: [[#@LINE-1]]:21: error: invalid cast opcode for cast from '<2 x ptr>' to 'i64' + ret i64 %addr +} + +;--- src_no_vec_dst_vec.ll +; RUN: not llvm-as %t/src_no_vec_dst_vec.ll -o /dev/null 2>&1 | FileCheck -check-prefix=SRC_NO_VEC_DST_VEC %s --implicit-check-not="error:" +define <2 x i64> @bad(ptr %p) { + %addr = ptrtoaddr ptr %p to <2 x i64> + ; SRC_NO_VEC_DST_VEC: [[#@LINE-1]]:21: error: invalid cast opcode for cast from 'ptr' to '<2 x i64>' + ret <2 x i64> %addr +} + +;--- dst_not_int.ll +; RUN: not llvm-as %t/dst_not_int.ll -o /dev/null 2>&1 | FileCheck -check-prefix=DST_NOT_INT %s --implicit-check-not="error:" +define float @bad(ptr %p) { + %addr = ptrtoaddr ptr %p to float + ; DST_NOT_INT: [[#@LINE-1]]:21: error: invalid cast opcode for cast from 'ptr' to 'float' + ret float %addr +} + +;--- dst_not_int_vec.ll +; RUN: not llvm-as %t/dst_not_int_vec.ll -o /dev/null 2>&1 | FileCheck -check-prefix=DST_NOT_INT_VEC %s --implicit-check-not="error:" +define <2 x float> @bad(<2 x ptr> %p) { + %addr = ptrtoaddr <2 x ptr> %p to <2 x float> + ; DST_NOT_INT_VEC: [[#@LINE-1]]:21: error: invalid cast opcode for cast from '<2 x ptr>' to '<2 x float>' + ret <2 x float> %addr +} + +;--- src_not_ptr.ll +; RUN: not llvm-as %t/src_not_ptr.ll -o /dev/null 2>&1 | FileCheck -check-prefix=SRC_NOT_PTR %s --implicit-check-not="error:" +define i64 @bad(i32 %p) { + %addr = ptrtoaddr i32 %p to i64 + ; SRC_NOT_PTR: [[#@LINE-1]]:21: error: invalid cast opcode for cast from 'i32' to 'i64' + ret i64 %addr +} + +;--- src_not_ptr_vec.ll +; RUN: not llvm-as %t/src_not_ptr_vec.ll -o /dev/null 2>&1 | FileCheck -check-prefix=SRC_NOT_PTR_VEC %s --implicit-check-not="error:" +define <2 x i64> @bad(<2 x i32> %p) { + %addr = ptrtoaddr <2 x i32> %p to <2 x i64> + ; SRC_NOT_PTR_VEC: [[#@LINE-1]]:21: error: invalid cast opcode for cast from '<2 x i32>' to '<2 x i64>' + ret <2 x i64> %addr +} + +;--- vec_src_fewer_elems.ll +; RUN: not llvm-as %t/vec_src_fewer_elems.ll -o /dev/null 2>&1 | FileCheck -check-prefix=VEC_SRC_FEWER_ELEMS %s --implicit-check-not="error:" +define <4 x i64> @bad(<2 x ptr> %p) { + %addr = ptrtoaddr <2 x ptr> %p to <4 x i64> + ; VEC_SRC_FEWER_ELEMS: [[#@LINE-1]]:21: error: invalid cast opcode for cast from '<2 x ptr>' to '<4 x i64>' + ret <4 x i64> %addr +} + +;--- vec_dst_fewer_elems.ll +; RUN: not llvm-as %t/vec_dst_fewer_elems.ll -o /dev/null 2>&1 | FileCheck -check-prefix=VEC_DST_FEWER_ELEMS %s --implicit-check-not="error:" +define <2 x i64> @bad(<4 x ptr> %p) { + %addr = ptrtoaddr <4 x ptr> %p to <2 x i64> + ; VEC_DST_FEWER_ELEMS: [[#@LINE-1]]:21: error: invalid cast opcode for cast from '<4 x ptr>' to '<2 x i64>' + ret <2 x i64> %addr +} + +;--- dst_not_addr_size.ll +; The following invalid IR is caught by the verifier, not the parser: +; RUN: llvm-as %t/dst_not_addr_size.ll --disable-output --disable-verify +; RUN: not llvm-as %t/dst_not_addr_size.ll -o /dev/null 2>&1 | FileCheck -check-prefix=DST_NOT_ADDR_SIZE %s --implicit-check-not="error:" +; DST_NOT_ADDR_SIZE: assembly parsed, but does not verify as correct! +define i32 @bad(ptr %p) { + %addr = ptrtoaddr ptr %p to i32 + ; DST_NOT_ADDR_SIZE: PtrToAddr result must be address width + ret i32 %addr +} +define <4 x i32> @bad_vec(<4 x ptr> %p) { + %addr = ptrtoaddr <4 x ptr> %p to <4 x i32> + ; DST_NOT_ADDR_SIZE: PtrToAddr result must be address width + ret <4 x i32> %addr +} diff --git a/llvm/test/Assembler/ptrtoaddr.ll b/llvm/test/Assembler/ptrtoaddr.ll new file mode 100644 index 0000000..f21410b --- /dev/null +++ b/llvm/test/Assembler/ptrtoaddr.ll @@ -0,0 +1,27 @@ +; RUN: llvm-as < %s | llvm-dis | FileCheck %s +target datalayout = "p1:64:64:64:32" + +@i_as0 = global i32 0 +@global_cast_as0 = global i64 ptrtoaddr (ptr @i_as0 to i64) +; CHECK: @global_cast_as0 = global i64 ptrtoaddr (ptr @i_as0 to i64) +@i_as1 = addrspace(1) global i32 0 +@global_cast_as1 = global i32 ptrtoaddr (ptr addrspace(1) @i_as1 to i32) +; CHECK: @global_cast_as1 = global i32 ptrtoaddr (ptr addrspace(1) @i_as1 to i32) + +define i64 @test_as0(ptr %p) { + %addr = ptrtoaddr ptr %p to i64 + ; CHECK: %addr = ptrtoaddr ptr %p to i64 + ret i64 %addr +} + +define i32 @test_as1(ptr addrspace(1) %p) { + %addr = ptrtoaddr ptr addrspace(1) %p to i32 + ; CHECK: %addr = ptrtoaddr ptr addrspace(1) %p to i32 + ret i32 %addr +} + +define <2 x i32> @test_vec_as1(<2 x ptr addrspace(1)> %p) { + %addr = ptrtoaddr <2 x ptr addrspace(1)> %p to <2 x i32> + ; CHECK: %addr = ptrtoaddr <2 x ptr addrspace(1)> %p to <2 x i32> + ret <2 x i32> %addr +} diff --git a/llvm/test/Bitcode/ptrtoaddr.ll b/llvm/test/Bitcode/ptrtoaddr.ll new file mode 100644 index 0000000..6c5fed2 --- /dev/null +++ b/llvm/test/Bitcode/ptrtoaddr.ll @@ -0,0 +1,27 @@ +; RUN: llvm-as < %s | llvm-dis | llvm-as | llvm-dis | FileCheck %s +target datalayout = "p1:64:64:64:32" + +@i_as0 = global i32 0 +@global_cast_as0 = global i64 ptrtoaddr (ptr @i_as0 to i64) +; CHECK: @global_cast_as0 = global i64 ptrtoaddr (ptr @i_as0 to i64) +@i_as1 = addrspace(1) global i32 0 +@global_cast_as1 = global i32 ptrtoaddr (ptr addrspace(1) @i_as1 to i32) +; CHECK: @global_cast_as1 = global i32 ptrtoaddr (ptr addrspace(1) @i_as1 to i32) + +define i64 @test_as0(ptr %p) { + %addr = ptrtoaddr ptr %p to i64 + ; CHECK: %addr = ptrtoaddr ptr %p to i64 + ret i64 %addr +} + +define i32 @test_as1(ptr addrspace(1) %p) { + %addr = ptrtoaddr ptr addrspace(1) %p to i32 + ; CHECK: %addr = ptrtoaddr ptr addrspace(1) %p to i32 + ret i32 %addr +} + +define <2 x i32> @test_vec_as1(<2 x ptr addrspace(1)> %p) { + %addr = ptrtoaddr <2 x ptr addrspace(1)> %p to <2 x i32> + ; CHECK: %addr = ptrtoaddr <2 x ptr addrspace(1)> %p to <2 x i32> + ret <2 x i32> %addr +} diff --git a/llvm/test/CMakeLists.txt b/llvm/test/CMakeLists.txt index 3042b8f..b46f482 100644 --- a/llvm/test/CMakeLists.txt +++ b/llvm/test/CMakeLists.txt @@ -30,7 +30,6 @@ llvm_canonicalize_cmake_booleans( LLVM_INCLUDE_SPIRV_TOOLS_TESTS LLVM_APPEND_VC_REV LLVM_HAS_LOGF128 - LLVM_EXPERIMENTAL_KEY_INSTRUCTIONS ) configure_lit_site_cfg( diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/vararg.mir b/llvm/test/CodeGen/AArch64/GlobalISel/vararg.mir index 437a9e6..3f14162 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/vararg.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/vararg.mir @@ -10,7 +10,7 @@ define i32 @va_start(ptr %a, ...) { entry: %ap = alloca %struct.__va_list, align 8 - call void @llvm.lifetime.start.p0(i64 32, ptr %ap) + call void @llvm.lifetime.start.p0(ptr %ap) call void @llvm.va_start.p0(ptr %ap) %vr_offs_p = getelementptr inbounds i8, ptr %ap, i64 28 %vr_offs = load i32, ptr %vr_offs_p, align 4 diff --git a/llvm/test/CodeGen/AArch64/aarch64-histcnt-dag-combine-hang.ll b/llvm/test/CodeGen/AArch64/aarch64-histcnt-dag-combine-hang.ll new file mode 100644 index 0000000..da04c67 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/aarch64-histcnt-dag-combine-hang.ll @@ -0,0 +1,70 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mattr=+sve2 -verify-machineinstrs < %s -o - | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +; This test is reduced from a real world example that would cause the DAGCombiner to hang. + +define void @histcnt_loop(ptr %0, i64 %1, ptr %2, i64 %3, i64 %4) { +; CHECK-LABEL: histcnt_loop: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: mov z0.d, #1 // =0x1 +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: mov x8, xzr +; CHECK-NEXT: add x9, x0, x1 +; CHECK-NEXT: .LBB0_1: // %loop +; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: ld1h { z1.d }, p0/z, [x0, x8, lsl #1] +; CHECK-NEXT: lsl x10, x8, #1 +; CHECK-NEXT: add x11, x0, x10 +; CHECK-NEXT: add x10, x9, x10 +; CHECK-NEXT: lsl z1.d, z1.d, #1 +; CHECK-NEXT: ld1h { z4.d }, p0/z, [x11, #1, mul vl] +; CHECK-NEXT: ld1h { z5.d }, p0/z, [x10, #1, mul vl] +; CHECK-NEXT: histcnt z2.d, p0/z, z1.d, z1.d +; CHECK-NEXT: ld1h { z3.d }, p0/z, [x2, z1.d] +; CHECK-NEXT: mad z2.d, p0/m, z0.d, z3.d +; CHECK-NEXT: ld1h { z3.d }, p0/z, [x9, x8, lsl #1] +; CHECK-NEXT: add x8, x8, x3 +; CHECK-NEXT: cmp x4, x8 +; CHECK-NEXT: st1h { z2.d }, p0, [x2, z1.d] +; CHECK-NEXT: lsl z1.d, z4.d, #1 +; CHECK-NEXT: histcnt z2.d, p0/z, z1.d, z1.d +; CHECK-NEXT: ld1h { z4.d }, p0/z, [x2, z1.d] +; CHECK-NEXT: mad z2.d, p0/m, z0.d, z4.d +; CHECK-NEXT: st1h { z2.d }, p0, [x2, z1.d] +; CHECK-NEXT: lsl z1.d, z3.d, #1 +; CHECK-NEXT: histcnt z2.d, p0/z, z1.d, z1.d +; CHECK-NEXT: ld1h { z3.d }, p0/z, [x2, z1.d] +; CHECK-NEXT: mad z2.d, p0/m, z0.d, z3.d +; CHECK-NEXT: st1h { z2.d }, p0, [x2, z1.d] +; CHECK-NEXT: lsl z1.d, z5.d, #1 +; CHECK-NEXT: histcnt z2.d, p0/z, z1.d, z1.d +; CHECK-NEXT: ld1h { z3.d }, p0/z, [x2, z1.d] +; CHECK-NEXT: mad z2.d, p0/m, z0.d, z3.d +; CHECK-NEXT: st1h { z2.d }, p0, [x2, z1.d] +; CHECK-NEXT: b.ne .LBB0_1 +; CHECK-NEXT: // %bb.2: // %exit +; CHECK-NEXT: ret +entry: + br label %loop + +loop: + %6 = phi i64 [ 0, %entry ], [ %15, %loop ] + %7 = getelementptr inbounds nuw i16, ptr %0, i64 %6 + %8 = getelementptr inbounds nuw i8, ptr %7, i64 %1 + %9 = load <vscale x 4 x i16>, ptr %7, align 2 + %10 = load <vscale x 4 x i16>, ptr %8, align 2 + %11 = zext <vscale x 4 x i16> %9 to <vscale x 4 x i64> + %12 = zext <vscale x 4 x i16> %10 to <vscale x 4 x i64> + %13 = getelementptr inbounds nuw [16 x i16], ptr %2, i64 0, <vscale x 4 x i64> %11 + %14 = getelementptr inbounds nuw [16 x i16], ptr %2, i64 0, <vscale x 4 x i64> %12 + call void @llvm.experimental.vector.histogram.add.nxv4p0.i16(<vscale x 4 x ptr> %13, i16 1, <vscale x 4 x i1> splat (i1 true)) + call void @llvm.experimental.vector.histogram.add.nxv4p0.i16(<vscale x 4 x ptr> %14, i16 1, <vscale x 4 x i1> splat (i1 true)) + %15 = add nuw i64 %6, %3 + %16 = icmp eq i64 %15, %4 + br i1 %16, label %exit, label %loop + +exit: + ret void +} diff --git a/llvm/test/CodeGen/AArch64/abd-combine.ll b/llvm/test/CodeGen/AArch64/abd-combine.ll index d025789..cdb40ce 100644 --- a/llvm/test/CodeGen/AArch64/abd-combine.ll +++ b/llvm/test/CodeGen/AArch64/abd-combine.ll @@ -17,12 +17,9 @@ define <8 x i16> @abdu_base(<8 x i16> %src1, <8 x i16> %src2) { define <8 x i16> @abdu_const(<8 x i16> %src1) { ; CHECK-LABEL: abdu_const: ; CHECK: // %bb.0: -; CHECK-NEXT: movi v1.4s, #1 -; CHECK-NEXT: ushll v2.4s, v0.4h, #0 -; CHECK-NEXT: ushll2 v0.4s, v0.8h, #0 -; CHECK-NEXT: uabd v0.4s, v0.4s, v1.4s -; CHECK-NEXT: uabd v1.4s, v2.4s, v1.4s -; CHECK-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; CHECK-NEXT: movi v1.4h, #1 +; CHECK-NEXT: mov v1.d[1], v1.d[0] +; CHECK-NEXT: uabd v0.8h, v0.8h, v1.8h ; CHECK-NEXT: ret %zextsrc1 = zext <8 x i16> %src1 to <8 x i32> %sub = sub <8 x i32> %zextsrc1, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> @@ -34,12 +31,9 @@ define <8 x i16> @abdu_const(<8 x i16> %src1) { define <8 x i16> @abdu_const_lhs(<8 x i16> %src1) { ; CHECK-LABEL: abdu_const_lhs: ; CHECK: // %bb.0: -; CHECK-NEXT: movi v1.4s, #1 -; CHECK-NEXT: ushll v2.4s, v0.4h, #0 -; CHECK-NEXT: ushll2 v0.4s, v0.8h, #0 -; CHECK-NEXT: uabd v0.4s, v0.4s, v1.4s -; CHECK-NEXT: uabd v1.4s, v2.4s, v1.4s -; CHECK-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; CHECK-NEXT: movi v1.4h, #1 +; CHECK-NEXT: mov v1.d[1], v1.d[0] +; CHECK-NEXT: uabd v0.8h, v0.8h, v1.8h ; CHECK-NEXT: ret %zextsrc1 = zext <8 x i16> %src1 to <8 x i32> %sub = sub <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>, %zextsrc1 @@ -318,12 +312,9 @@ define <8 x i16> @abds_base(<8 x i16> %src1, <8 x i16> %src2) { define <8 x i16> @abds_const(<8 x i16> %src1) { ; CHECK-LABEL: abds_const: ; CHECK: // %bb.0: -; CHECK-NEXT: movi v1.4s, #1 -; CHECK-NEXT: sshll v2.4s, v0.4h, #0 -; CHECK-NEXT: sshll2 v0.4s, v0.8h, #0 -; CHECK-NEXT: sabd v0.4s, v0.4s, v1.4s -; CHECK-NEXT: sabd v1.4s, v2.4s, v1.4s -; CHECK-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; CHECK-NEXT: movi v1.4h, #1 +; CHECK-NEXT: mov v1.d[1], v1.d[0] +; CHECK-NEXT: sabd v0.8h, v0.8h, v1.8h ; CHECK-NEXT: ret %zextsrc1 = sext <8 x i16> %src1 to <8 x i32> %sub = sub <8 x i32> %zextsrc1, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> @@ -335,12 +326,9 @@ define <8 x i16> @abds_const(<8 x i16> %src1) { define <8 x i16> @abds_const_lhs(<8 x i16> %src1) { ; CHECK-LABEL: abds_const_lhs: ; CHECK: // %bb.0: -; CHECK-NEXT: movi v1.4s, #1 -; CHECK-NEXT: sshll v2.4s, v0.4h, #0 -; CHECK-NEXT: sshll2 v0.4s, v0.8h, #0 -; CHECK-NEXT: sabd v0.4s, v0.4s, v1.4s -; CHECK-NEXT: sabd v1.4s, v2.4s, v1.4s -; CHECK-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; CHECK-NEXT: movi v1.4h, #1 +; CHECK-NEXT: mov v1.d[1], v1.d[0] +; CHECK-NEXT: sabd v0.8h, v0.8h, v1.8h ; CHECK-NEXT: ret %zextsrc1 = sext <8 x i16> %src1 to <8 x i32> %sub = sub <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>, %zextsrc1 @@ -352,11 +340,10 @@ define <8 x i16> @abds_const_lhs(<8 x i16> %src1) { define <8 x i16> @abds_const_zero(<8 x i16> %src1) { ; CHECK-LABEL: abds_const_zero: ; CHECK: // %bb.0: -; CHECK-NEXT: sshll v1.4s, v0.4h, #0 -; CHECK-NEXT: sshll2 v0.4s, v0.8h, #0 -; CHECK-NEXT: abs v0.4s, v0.4s -; CHECK-NEXT: abs v1.4s, v1.4s -; CHECK-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8 +; CHECK-NEXT: abs v0.4h, v0.4h +; CHECK-NEXT: abs v1.4h, v1.4h +; CHECK-NEXT: mov v0.d[1], v1.d[0] ; CHECK-NEXT: ret %zextsrc1 = sext <8 x i16> %src1 to <8 x i32> %sub = sub <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>, %zextsrc1 diff --git a/llvm/test/CodeGen/AArch64/arm64-neon-3vdiff.ll b/llvm/test/CodeGen/AArch64/arm64-neon-3vdiff.ll index 256ff94..9a1b6a0 100644 --- a/llvm/test/CodeGen/AArch64/arm64-neon-3vdiff.ll +++ b/llvm/test/CodeGen/AArch64/arm64-neon-3vdiff.ll @@ -70,6 +70,23 @@ entry: ret <2 x i64> %add.i } +define void @test_commutable_vaddl_s8(<8 x i8> %a, <8 x i8> %b, ptr %c) { +; CHECK-LABEL: test_commutable_vaddl_s8: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: saddl v0.8h, v0.8b, v1.8b +; CHECK-NEXT: stp q0, q0, [x0] +; CHECK-NEXT: ret +entry: + %vmovl.i.i = sext <8 x i8> %a to <8 x i16> + %vmovl.i2.i = sext <8 x i8> %b to <8 x i16> + %add.i = add <8 x i16> %vmovl.i.i, %vmovl.i2.i + store <8 x i16> %add.i, ptr %c + %add.i2 = add <8 x i16> %vmovl.i2.i, %vmovl.i.i + %c.gep.1 = getelementptr i8, ptr %c, i64 16 + store <8 x i16> %add.i2, ptr %c.gep.1 + ret void +} + define <8 x i16> @test_vaddl_u8(<8 x i8> %a, <8 x i8> %b) { ; CHECK-LABEL: test_vaddl_u8: ; CHECK: // %bb.0: // %entry @@ -106,6 +123,23 @@ entry: ret <2 x i64> %add.i } +define void @test_commutable_vaddl_u8(<8 x i8> %a, <8 x i8> %b, ptr %c) { +; CHECK-LABEL: test_commutable_vaddl_u8: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: uaddl v0.8h, v0.8b, v1.8b +; CHECK-NEXT: stp q0, q0, [x0] +; CHECK-NEXT: ret +entry: + %vmovl.i.i = zext <8 x i8> %a to <8 x i16> + %vmovl.i2.i = zext <8 x i8> %b to <8 x i16> + %add.i = add <8 x i16> %vmovl.i.i, %vmovl.i2.i + store <8 x i16> %add.i, ptr %c + %add.i2 = add <8 x i16> %vmovl.i2.i, %vmovl.i.i + %c.gep.1 = getelementptr i8, ptr %c, i64 16 + store <8 x i16> %add.i2, ptr %c.gep.1 + ret void +} + define <8 x i16> @test_vaddl_a8(<8 x i8> %a, <8 x i8> %b) { ; CHECK-SD-LABEL: test_vaddl_a8: ; CHECK-SD: // %bb.0: // %entry @@ -2892,9 +2926,9 @@ define <8 x i16> @cmplx_mul_combined_re_im(<8 x i16> noundef %a, i64 %scale.coer ; CHECK-GI-LABEL: cmplx_mul_combined_re_im: ; CHECK-GI: // %bb.0: // %entry ; CHECK-GI-NEXT: lsr x9, x0, #16 -; CHECK-GI-NEXT: adrp x8, .LCPI196_0 +; CHECK-GI-NEXT: adrp x8, .LCPI198_0 ; CHECK-GI-NEXT: rev32 v4.8h, v0.8h -; CHECK-GI-NEXT: ldr q3, [x8, :lo12:.LCPI196_0] +; CHECK-GI-NEXT: ldr q3, [x8, :lo12:.LCPI198_0] ; CHECK-GI-NEXT: fmov d1, x9 ; CHECK-GI-NEXT: dup v2.8h, v1.h[0] ; CHECK-GI-NEXT: sqneg v1.8h, v2.8h diff --git a/llvm/test/CodeGen/AArch64/arm64-neon-aba-abd.ll b/llvm/test/CodeGen/AArch64/arm64-neon-aba-abd.ll index 6c7ddd9..ccd1917 100644 --- a/llvm/test/CodeGen/AArch64/arm64-neon-aba-abd.ll +++ b/llvm/test/CodeGen/AArch64/arm64-neon-aba-abd.ll @@ -575,3 +575,69 @@ define <4 x i32> @knownbits_sabd_and_mul_mask(<4 x i32> %a0, <4 x i32> %a1) { %6 = shufflevector <4 x i32> %5, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 3, i32 3> ret <4 x i32> %6 } + +define <4 x i16> @trunc_abdu_foldable(<4 x i16> %a, <4 x i16> %b) { +; CHECK-SD-LABEL: trunc_abdu_foldable: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: uabd v0.4h, v0.4h, v1.4h +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: trunc_abdu_foldable: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0 +; CHECK-GI-NEXT: ushll v1.4s, v1.4h, #0 +; CHECK-GI-NEXT: uabd v0.4s, v0.4s, v1.4s +; CHECK-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-GI-NEXT: ret + %ext_a = zext <4 x i16> %a to <4 x i32> + %ext_b = zext <4 x i16> %b to <4 x i32> + %abd = call <4 x i32> @llvm.aarch64.neon.uabd.v4i32(<4 x i32> %ext_a, <4 x i32> %ext_b) + %trunc = trunc <4 x i32> %abd to <4 x i16> + ret <4 x i16> %trunc +} + +define <4 x i16> @trunc_abds_foldable(<4 x i16> %a, <4 x i16> %b) { +; CHECK-SD-LABEL: trunc_abds_foldable: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sabd v0.4h, v0.4h, v1.4h +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: trunc_abds_foldable: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: sshll v0.4s, v0.4h, #0 +; CHECK-GI-NEXT: sshll v1.4s, v1.4h, #0 +; CHECK-GI-NEXT: sabd v0.4s, v0.4s, v1.4s +; CHECK-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-GI-NEXT: ret + %a32 = sext <4 x i16> %a to <4 x i32> + %b32 = sext <4 x i16> %b to <4 x i32> + %abd32 = call <4 x i32> @llvm.aarch64.neon.sabd.v4i32(<4 x i32> %a32, <4 x i32> %b32) + %res16 = trunc <4 x i32> %abd32 to <4 x i16> + ret <4 x i16> %res16 +} + +define <4 x i16> @trunc_abdu_not_foldable(<4 x i16> %a, <4 x i32> %b) { +; CHECK-LABEL: trunc_abdu_not_foldable: +; CHECK: // %bb.0: +; CHECK-NEXT: ushll v0.4s, v0.4h, #0 +; CHECK-NEXT: uabd v0.4s, v0.4s, v1.4s +; CHECK-NEXT: xtn v0.4h, v0.4s +; CHECK-NEXT: ret + %ext_a = zext <4 x i16> %a to <4 x i32> + %abd = call <4 x i32> @llvm.aarch64.neon.uabd.v4i32(<4 x i32> %ext_a, <4 x i32> %b) + %trunc = trunc <4 x i32> %abd to <4 x i16> + ret <4 x i16> %trunc +} + +define <4 x i16> @truncate_abds_testcase1(<4 x i16> %a, <4 x i32> %b) { +; CHECK-LABEL: truncate_abds_testcase1: +; CHECK: // %bb.0: +; CHECK-NEXT: sshll v0.4s, v0.4h, #0 +; CHECK-NEXT: sabd v0.4s, v0.4s, v1.4s +; CHECK-NEXT: xtn v0.4h, v0.4s +; CHECK-NEXT: ret + %a32 = sext <4 x i16> %a to <4 x i32> + %abd32 = call <4 x i32> @llvm.aarch64.neon.sabd.v4i32(<4 x i32> %a32, <4 x i32> %b) + %res16 = trunc <4 x i32> %abd32 to <4 x i16> + ret <4 x i16> %res16 +} diff --git a/llvm/test/CodeGen/AArch64/arm64-neon-mul-div.ll b/llvm/test/CodeGen/AArch64/arm64-neon-mul-div.ll index ecf3f69..0d427c0 100644 --- a/llvm/test/CodeGen/AArch64/arm64-neon-mul-div.ll +++ b/llvm/test/CodeGen/AArch64/arm64-neon-mul-div.ll @@ -1608,6 +1608,18 @@ define <16 x i8> @poly_mulv16i8(<16 x i8> %lhs, <16 x i8> %rhs) { ret <16 x i8> %prod } +define <16 x i8> @commutable_poly_mul(<16 x i8> %lhs, <16 x i8> %rhs) { +; CHECK-LABEL: commutable_poly_mul: +; CHECK: // %bb.0: +; CHECK-NEXT: pmul v0.16b, v0.16b, v1.16b +; CHECK-NEXT: add v0.16b, v0.16b, v0.16b +; CHECK-NEXT: ret + %1 = call <16 x i8> @llvm.aarch64.neon.pmul.v16i8(<16 x i8> %lhs, <16 x i8> %rhs) + %2 = call <16 x i8> @llvm.aarch64.neon.pmul.v16i8(<16 x i8> %rhs, <16 x i8> %lhs) + %3 = add <16 x i8> %1, %2 + ret <16 x i8> %3 +} + declare <4 x i16> @llvm.aarch64.neon.sqdmulh.v4i16(<4 x i16>, <4 x i16>) declare <8 x i16> @llvm.aarch64.neon.sqdmulh.v8i16(<8 x i16>, <8 x i16>) declare <2 x i32> @llvm.aarch64.neon.sqdmulh.v2i32(<2 x i32>, <2 x i32>) diff --git a/llvm/test/CodeGen/AArch64/arm64-vabs.ll b/llvm/test/CodeGen/AArch64/arm64-vabs.ll index 78881c8..ede5a7c 100644 --- a/llvm/test/CodeGen/AArch64/arm64-vabs.ll +++ b/llvm/test/CodeGen/AArch64/arm64-vabs.ll @@ -44,6 +44,35 @@ define <2 x i64> @sabdl2d(ptr %A, ptr %B) nounwind { ret <2 x i64> %tmp4 } +define void @commutable_sabdl(ptr %A, ptr %B, ptr %C) nounwind { +; CHECK-SD-LABEL: commutable_sabdl: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: ldr d0, [x0] +; CHECK-SD-NEXT: ldr d1, [x1] +; CHECK-SD-NEXT: sabdl.8h v0, v1, v0 +; CHECK-SD-NEXT: str q0, [x2] +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: commutable_sabdl: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: ldr d0, [x0] +; CHECK-GI-NEXT: ldr d1, [x1] +; CHECK-GI-NEXT: sabdl.8h v0, v0, v1 +; CHECK-GI-NEXT: str q0, [x2] +; CHECK-GI-NEXT: str q0, [x2] +; CHECK-GI-NEXT: ret + %tmp1 = load <8 x i8>, ptr %A + %tmp2 = load <8 x i8>, ptr %B + %tmp3 = call <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) + %tmp4 = zext <8 x i8> %tmp3 to <8 x i16> + store <8 x i16> %tmp4, ptr %C + %tmp5 = call <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8> %tmp2, <8 x i8> %tmp1) + %tmp6 = zext <8 x i8> %tmp5 to <8 x i16> + %tmp7 = getelementptr i8, ptr %C, i64 16 + store <8 x i16> %tmp6, ptr %C + ret void +} + define <8 x i16> @sabdl2_8h(ptr %A, ptr %B) nounwind { ; CHECK-SD-LABEL: sabdl2_8h: ; CHECK-SD: // %bb.0: @@ -155,6 +184,35 @@ define <2 x i64> @uabdl2d(ptr %A, ptr %B) nounwind { ret <2 x i64> %tmp4 } +define void @commutable_uabdl(ptr %A, ptr %B, ptr %C) nounwind { +; CHECK-SD-LABEL: commutable_uabdl: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: ldr d0, [x0] +; CHECK-SD-NEXT: ldr d1, [x1] +; CHECK-SD-NEXT: uabdl.8h v0, v1, v0 +; CHECK-SD-NEXT: str q0, [x2] +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: commutable_uabdl: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: ldr d0, [x0] +; CHECK-GI-NEXT: ldr d1, [x1] +; CHECK-GI-NEXT: uabdl.8h v0, v0, v1 +; CHECK-GI-NEXT: str q0, [x2] +; CHECK-GI-NEXT: str q0, [x2] +; CHECK-GI-NEXT: ret + %tmp1 = load <8 x i8>, ptr %A + %tmp2 = load <8 x i8>, ptr %B + %tmp3 = call <8 x i8> @llvm.aarch64.neon.uabd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) + %tmp4 = zext <8 x i8> %tmp3 to <8 x i16> + store <8 x i16> %tmp4, ptr %C + %tmp5 = call <8 x i8> @llvm.aarch64.neon.uabd.v8i8(<8 x i8> %tmp2, <8 x i8> %tmp1) + %tmp6 = zext <8 x i8> %tmp5 to <8 x i16> + %tmp7 = getelementptr i8, ptr %C, i64 16 + store <8 x i16> %tmp6, ptr %C + ret void +} + define <8 x i16> @uabdl2_8h(ptr %A, ptr %B) nounwind { ; CHECK-SD-LABEL: uabdl2_8h: ; CHECK-SD: // %bb.0: diff --git a/llvm/test/CodeGen/AArch64/arm64-vmul.ll b/llvm/test/CodeGen/AArch64/arm64-vmul.ll index 07400bb..d12f7ce 100644 --- a/llvm/test/CodeGen/AArch64/arm64-vmul.ll +++ b/llvm/test/CodeGen/AArch64/arm64-vmul.ll @@ -3,6 +3,7 @@ ; RUN: llc -mtriple=aarch64-none-elf -mattr=+aes -global-isel -global-isel-abort=2 2>&1 < %s | FileCheck %s --check-prefixes=CHECK,CHECK-GI ; CHECK-GI: warning: Instruction selection used fallback path for pmull8h +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for commutable_pmull8h ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sqdmulh_1s ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fmls_2s ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fmls_4s @@ -78,6 +79,20 @@ define <2 x i64> @smull2d(ptr %A, ptr %B) nounwind { ret <2 x i64> %tmp3 } +define void @commutable_smull(<2 x i32> %A, <2 x i32> %B, ptr %C) { +; CHECK-LABEL: commutable_smull: +; CHECK: // %bb.0: +; CHECK-NEXT: smull v0.2d, v0.2s, v1.2s +; CHECK-NEXT: stp q0, q0, [x0] +; CHECK-NEXT: ret + %1 = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %A, <2 x i32> %B) + %2 = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %B, <2 x i32> %A) + store <2 x i64> %1, ptr %C + %3 = getelementptr i8, ptr %C, i64 16 + store <2 x i64> %2, ptr %3 + ret void +} + declare <8 x i16> @llvm.aarch64.neon.smull.v8i16(<8 x i8>, <8 x i8>) nounwind readnone declare <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16>, <4 x i16>) nounwind readnone declare <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32>, <2 x i32>) nounwind readnone @@ -121,6 +136,20 @@ define <2 x i64> @umull2d(ptr %A, ptr %B) nounwind { ret <2 x i64> %tmp3 } +define void @commutable_umull(<2 x i32> %A, <2 x i32> %B, ptr %C) { +; CHECK-LABEL: commutable_umull: +; CHECK: // %bb.0: +; CHECK-NEXT: umull v0.2d, v0.2s, v1.2s +; CHECK-NEXT: stp q0, q0, [x0] +; CHECK-NEXT: ret + %1 = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %A, <2 x i32> %B) + %2 = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %B, <2 x i32> %A) + store <2 x i64> %1, ptr %C + %3 = getelementptr i8, ptr %C, i64 16 + store <2 x i64> %2, ptr %3 + ret void +} + declare <8 x i16> @llvm.aarch64.neon.umull.v8i16(<8 x i8>, <8 x i8>) nounwind readnone declare <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16>, <4 x i16>) nounwind readnone declare <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32>, <2 x i32>) nounwind readnone @@ -212,6 +241,20 @@ define <8 x i16> @pmull8h(ptr %A, ptr %B) nounwind { ret <8 x i16> %tmp3 } +define void @commutable_pmull8h(<8 x i8> %A, <8 x i8> %B, ptr %C) { +; CHECK-LABEL: commutable_pmull8h: +; CHECK: // %bb.0: +; CHECK-NEXT: pmull v0.8h, v0.8b, v1.8b +; CHECK-NEXT: stp q0, q0, [x0] +; CHECK-NEXT: ret + %1 = call <8 x i16> @llvm.aarch64.neon.pmull.v8i16(<8 x i8> %A, <8 x i8> %B) + %2 = call <8 x i16> @llvm.aarch64.neon.pmull.v8i16(<8 x i8> %B, <8 x i8> %A) + store <8 x i16> %1, ptr %C + %3 = getelementptr i8, ptr %C, i8 16 + store <8 x i16> %2, ptr %3 + ret void +} + declare <8 x i16> @llvm.aarch64.neon.pmull.v8i16(<8 x i8>, <8 x i8>) nounwind readnone define <4 x i16> @sqdmulh_4h(ptr %A, ptr %B) nounwind { @@ -487,10 +530,10 @@ define void @smlal2d_chain_with_constant(ptr %dst, <2 x i32> %v1, <2 x i32> %v2, ; CHECK-GI-LABEL: smlal2d_chain_with_constant: ; CHECK-GI: // %bb.0: ; CHECK-GI-NEXT: mvn v3.8b, v2.8b -; CHECK-GI-NEXT: adrp x8, .LCPI27_0 +; CHECK-GI-NEXT: adrp x8, .LCPI30_0 ; CHECK-GI-NEXT: smull v1.2d, v1.2s, v3.2s ; CHECK-GI-NEXT: smlal v1.2d, v0.2s, v2.2s -; CHECK-GI-NEXT: ldr q0, [x8, :lo12:.LCPI27_0] +; CHECK-GI-NEXT: ldr q0, [x8, :lo12:.LCPI30_0] ; CHECK-GI-NEXT: add v0.2d, v1.2d, v0.2d ; CHECK-GI-NEXT: str q0, [x0] ; CHECK-GI-NEXT: ret @@ -566,8 +609,8 @@ define void @smlsl2d_chain_with_constant(ptr %dst, <2 x i32> %v1, <2 x i32> %v2, ; ; CHECK-GI-LABEL: smlsl2d_chain_with_constant: ; CHECK-GI: // %bb.0: -; CHECK-GI-NEXT: adrp x8, .LCPI31_0 -; CHECK-GI-NEXT: ldr q3, [x8, :lo12:.LCPI31_0] +; CHECK-GI-NEXT: adrp x8, .LCPI34_0 +; CHECK-GI-NEXT: ldr q3, [x8, :lo12:.LCPI34_0] ; CHECK-GI-NEXT: smlsl v3.2d, v0.2s, v2.2s ; CHECK-GI-NEXT: mvn v0.8b, v2.8b ; CHECK-GI-NEXT: smlsl v3.2d, v1.2s, v0.2s @@ -829,10 +872,10 @@ define void @umlal2d_chain_with_constant(ptr %dst, <2 x i32> %v1, <2 x i32> %v2, ; CHECK-GI-LABEL: umlal2d_chain_with_constant: ; CHECK-GI: // %bb.0: ; CHECK-GI-NEXT: mvn v3.8b, v2.8b -; CHECK-GI-NEXT: adrp x8, .LCPI43_0 +; CHECK-GI-NEXT: adrp x8, .LCPI46_0 ; CHECK-GI-NEXT: umull v1.2d, v1.2s, v3.2s ; CHECK-GI-NEXT: umlal v1.2d, v0.2s, v2.2s -; CHECK-GI-NEXT: ldr q0, [x8, :lo12:.LCPI43_0] +; CHECK-GI-NEXT: ldr q0, [x8, :lo12:.LCPI46_0] ; CHECK-GI-NEXT: add v0.2d, v1.2d, v0.2d ; CHECK-GI-NEXT: str q0, [x0] ; CHECK-GI-NEXT: ret @@ -908,8 +951,8 @@ define void @umlsl2d_chain_with_constant(ptr %dst, <2 x i32> %v1, <2 x i32> %v2, ; ; CHECK-GI-LABEL: umlsl2d_chain_with_constant: ; CHECK-GI: // %bb.0: -; CHECK-GI-NEXT: adrp x8, .LCPI47_0 -; CHECK-GI-NEXT: ldr q3, [x8, :lo12:.LCPI47_0] +; CHECK-GI-NEXT: adrp x8, .LCPI50_0 +; CHECK-GI-NEXT: ldr q3, [x8, :lo12:.LCPI50_0] ; CHECK-GI-NEXT: umlsl v3.2d, v0.2s, v2.2s ; CHECK-GI-NEXT: mvn v0.8b, v2.8b ; CHECK-GI-NEXT: umlsl v3.2d, v1.2s, v0.2s @@ -3222,6 +3265,20 @@ define <16 x i8> @test_pmull_high_64(<2 x i64> %l, <2 x i64> %r) nounwind { ret <16 x i8> %val } +define <16 x i8> @test_commutable_pmull_64(i64 %l, i64 %r) nounwind { +; CHECK-LABEL: test_commutable_pmull_64: +; CHECK: // %bb.0: +; CHECK-NEXT: fmov d0, x1 +; CHECK-NEXT: fmov d1, x0 +; CHECK-NEXT: pmull v0.1q, v1.1d, v0.1d +; CHECK-NEXT: add v0.16b, v0.16b, v0.16b +; CHECK-NEXT: ret + %1 = call <16 x i8> @llvm.aarch64.neon.pmull64(i64 %l, i64 %r) + %2 = call <16 x i8> @llvm.aarch64.neon.pmull64(i64 %r, i64 %l) + %3 = add <16 x i8> %1, %2 + ret <16 x i8> %3 +} + declare <16 x i8> @llvm.aarch64.neon.pmull64(i64, i64) define <1 x i64> @test_mul_v1i64(<1 x i64> %lhs, <1 x i64> %rhs) nounwind { diff --git a/llvm/test/CodeGen/AArch64/bsp_implicit_ops.mir b/llvm/test/CodeGen/AArch64/bsp_implicit_ops.mir index 23ac67c..805d244 100644 --- a/llvm/test/CodeGen/AArch64/bsp_implicit_ops.mir +++ b/llvm/test/CodeGen/AArch64/bsp_implicit_ops.mir @@ -96,3 +96,23 @@ body: | $q25 = ORRv16i8 $q3, killed $q3 RET_ReallyLR implicit $q22 ... +--- +name: DoubleOp +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $q2 + + ; CHECK-LABEL: name: DoubleOp + ; CHECK: liveins: $q2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $q0 = MOVIv8i16 1, 0 + ; CHECK-NEXT: renamable $q1 = ORRv16i8 renamable $q2, renamable $q2 + ; CHECK-NEXT: renamable $q1 = BSLv16i8 killed renamable $q1, renamable $q2, renamable $q0 + ; CHECK-NEXT: renamable $q0 = SQADDv8i16 killed renamable $q1, killed renamable $q0 + ; CHECK-NEXT: RET undef $lr, implicit $q0 + renamable $q0 = MOVIv8i16 1, 0 + renamable $q1 = BSPv16i8 killed renamable $q2, renamable $q2, renamable $q0 + renamable $q0 = SQADDv8i16 killed renamable $q1, killed renamable $q0 + RET_ReallyLR implicit $q0 +... diff --git a/llvm/test/CodeGen/AArch64/lifetime-poison.ll b/llvm/test/CodeGen/AArch64/lifetime-poison.ll index e04530d..dfb76d1 100644 --- a/llvm/test/CodeGen/AArch64/lifetime-poison.ll +++ b/llvm/test/CodeGen/AArch64/lifetime-poison.ll @@ -8,7 +8,7 @@ define void @test() { ; CHECK-LABEL: test: ; CHECK: // %bb.0: ; CHECK-NEXT: ret - call void @llvm.lifetime.start.p0(i64 4, ptr poison) - call void @llvm.lifetime.end.p0(i64 4, ptr poison) + call void @llvm.lifetime.start.p0(ptr poison) + call void @llvm.lifetime.end.p0(ptr poison) ret void } diff --git a/llvm/test/CodeGen/AArch64/stack-tagging-initializer-merge.ll b/llvm/test/CodeGen/AArch64/stack-tagging-initializer-merge.ll index 0711f69..df83762 100644 --- a/llvm/test/CodeGen/AArch64/stack-tagging-initializer-merge.ll +++ b/llvm/test/CodeGen/AArch64/stack-tagging-initializer-merge.ll @@ -5,8 +5,8 @@ target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" target triple = "aarch64--linux-android" declare void @use(ptr) -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg) define void @OneVarNoInit() sanitize_memtag { @@ -16,18 +16,18 @@ define void @OneVarNoInit() sanitize_memtag { ; CHECK-NEXT: [[BASETAG:%.*]] = call ptr @llvm.aarch64.irg.sp(i64 0) ; CHECK-NEXT: [[X:%.*]] = alloca { i32, [12 x i8] }, align 16 ; CHECK-NEXT: [[TX:%.*]] = call ptr @llvm.aarch64.tagp.p0(ptr [[X]], ptr [[BASETAG]], i64 0) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X]]) ; CHECK-NEXT: call void @llvm.aarch64.settag(ptr [[TX]], i64 16) ; CHECK-NEXT: call void @use(ptr nonnull [[TX]]) ; CHECK-NEXT: call void @llvm.aarch64.settag(ptr [[X]], i64 16) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X]]) ; CHECK-NEXT: ret void ; entry: %x = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %x) + call void @llvm.lifetime.start.p0(ptr nonnull %x) call void @use(ptr nonnull %x) - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %x) + call void @llvm.lifetime.end.p0(ptr nonnull %x) ret void } @@ -39,19 +39,19 @@ define void @OneVarInitConst() sanitize_memtag { ; CHECK-NEXT: [[BASETAG:%.*]] = call ptr @llvm.aarch64.irg.sp(i64 0) ; CHECK-NEXT: [[X:%.*]] = alloca { i32, [12 x i8] }, align 16 ; CHECK-NEXT: [[TX:%.*]] = call ptr @llvm.aarch64.tagp.p0(ptr [[X]], ptr [[BASETAG]], i64 0) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X]]) ; CHECK-NEXT: call void @llvm.aarch64.stgp(ptr [[TX]], i64 42, i64 0) ; CHECK-NEXT: call void @use(ptr nonnull [[TX]]) ; CHECK-NEXT: call void @llvm.aarch64.settag(ptr [[X]], i64 16) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X]]) ; CHECK-NEXT: ret void ; entry: %x = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %x) + call void @llvm.lifetime.start.p0(ptr nonnull %x) store i32 42, ptr %x, align 4 call void @use(ptr nonnull %x) - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %x) + call void @llvm.lifetime.end.p0(ptr nonnull %x) ret void } @@ -64,21 +64,21 @@ define void @ArrayInitConst() sanitize_memtag { ; CHECK-NEXT: [[BASETAG:%.*]] = call ptr @llvm.aarch64.irg.sp(i64 0) ; CHECK-NEXT: [[X:%.*]] = alloca i32, i32 16, align 16 ; CHECK-NEXT: [[TX:%.*]] = call ptr @llvm.aarch64.tagp.p0(ptr [[X]], ptr [[BASETAG]], i64 0) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 64, ptr nonnull [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X]]) ; CHECK-NEXT: call void @llvm.aarch64.stgp(ptr [[TX]], i64 42, i64 0) ; CHECK-NEXT: [[TX8_16:%.*]] = getelementptr i8, ptr [[TX]], i32 16 ; CHECK-NEXT: call void @llvm.aarch64.settag.zero(ptr [[TX8_16]], i64 48) ; CHECK-NEXT: call void @use(ptr nonnull [[TX]]) ; CHECK-NEXT: call void @llvm.aarch64.settag(ptr [[X]], i64 64) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 64, ptr nonnull [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X]]) ; CHECK-NEXT: ret void ; entry: %x = alloca i32, i32 16, align 4 - call void @llvm.lifetime.start.p0(i64 64, ptr nonnull %x) + call void @llvm.lifetime.start.p0(ptr nonnull %x) store i32 42, ptr %x, align 4 call void @use(ptr nonnull %x) - call void @llvm.lifetime.end.p0(i64 64, ptr nonnull %x) + call void @llvm.lifetime.end.p0(ptr nonnull %x) ret void } @@ -90,7 +90,7 @@ define void @ArrayInitConst2() sanitize_memtag { ; CHECK-NEXT: [[BASETAG:%.*]] = call ptr @llvm.aarch64.irg.sp(i64 0) ; CHECK-NEXT: [[X:%.*]] = alloca i32, i32 16, align 16 ; CHECK-NEXT: [[TX:%.*]] = call ptr @llvm.aarch64.tagp.p0(ptr [[X]], ptr [[BASETAG]], i64 0) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 64, ptr nonnull [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X]]) ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i32, ptr [[TX]], i32 1 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i32, ptr [[TX]], i32 2 ; CHECK-NEXT: call void @llvm.aarch64.stgp(ptr [[TX]], i64 184683593770, i64 -1) @@ -98,19 +98,19 @@ define void @ArrayInitConst2() sanitize_memtag { ; CHECK-NEXT: call void @llvm.aarch64.settag.zero(ptr [[TX8_16]], i64 48) ; CHECK-NEXT: call void @use(ptr nonnull [[TX]]) ; CHECK-NEXT: call void @llvm.aarch64.settag(ptr [[X]], i64 64) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 64, ptr nonnull [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X]]) ; CHECK-NEXT: ret void ; entry: %x = alloca i32, i32 16, align 4 - call void @llvm.lifetime.start.p0(i64 64, ptr nonnull %x) + call void @llvm.lifetime.start.p0(ptr nonnull %x) store i32 42, ptr %x, align 4 %0 = getelementptr i32, ptr %x, i32 1 store i32 43, ptr %0, align 4 %1 = getelementptr i32, ptr %x, i32 2 store i64 -1, ptr %1, align 4 call void @use(ptr nonnull %x) - call void @llvm.lifetime.end.p0(i64 64, ptr nonnull %x) + call void @llvm.lifetime.end.p0(ptr nonnull %x) ret void } @@ -122,23 +122,23 @@ define void @ArrayInitConstSplit() sanitize_memtag { ; CHECK-NEXT: [[BASETAG:%.*]] = call ptr @llvm.aarch64.irg.sp(i64 0) ; CHECK-NEXT: [[X:%.*]] = alloca i32, i32 16, align 16 ; CHECK-NEXT: [[TX:%.*]] = call ptr @llvm.aarch64.tagp.p0(ptr [[X]], ptr [[BASETAG]], i64 0) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 64, ptr nonnull [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X]]) ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i32, ptr [[TX]], i32 1 ; CHECK-NEXT: call void @llvm.aarch64.stgp(ptr [[TX]], i64 -4294967296, i64 4294967295) ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[TX]], i32 16 ; CHECK-NEXT: call void @llvm.aarch64.settag.zero(ptr [[TMP1]], i64 48) ; CHECK-NEXT: call void @use(ptr nonnull [[TX]]) ; CHECK-NEXT: call void @llvm.aarch64.settag(ptr [[X]], i64 64) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 64, ptr nonnull [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X]]) ; CHECK-NEXT: ret void ; entry: %x = alloca i32, i32 16, align 4 - call void @llvm.lifetime.start.p0(i64 64, ptr nonnull %x) + call void @llvm.lifetime.start.p0(ptr nonnull %x) %0 = getelementptr i32, ptr %x, i32 1 store i64 -1, ptr %0, align 4 call void @use(ptr nonnull %x) - call void @llvm.lifetime.end.p0(i64 64, ptr nonnull %x) + call void @llvm.lifetime.end.p0(ptr nonnull %x) ret void } @@ -150,7 +150,7 @@ define void @ArrayInitConstWithHoles() sanitize_memtag { ; CHECK-NEXT: [[BASETAG:%.*]] = call ptr @llvm.aarch64.irg.sp(i64 0) ; CHECK-NEXT: [[X:%.*]] = alloca i32, i32 32, align 16 ; CHECK-NEXT: [[TX:%.*]] = call ptr @llvm.aarch64.tagp.p0(ptr [[X]], ptr [[BASETAG]], i64 0) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 128, ptr nonnull [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X]]) ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i32, ptr [[TX]], i32 5 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i32, ptr [[TX]], i32 14 ; CHECK-NEXT: call void @llvm.aarch64.settag.zero(ptr [[TX]], i64 16) @@ -164,18 +164,18 @@ define void @ArrayInitConstWithHoles() sanitize_memtag { ; CHECK-NEXT: call void @llvm.aarch64.settag.zero(ptr [[TX8_64]], i64 64) ; CHECK-NEXT: call void @use(ptr nonnull [[TX]]) ; CHECK-NEXT: call void @llvm.aarch64.settag(ptr [[X]], i64 128) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 128, ptr nonnull [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X]]) ; CHECK-NEXT: ret void ; entry: %x = alloca i32, i32 32, align 4 - call void @llvm.lifetime.start.p0(i64 128, ptr nonnull %x) + call void @llvm.lifetime.start.p0(ptr nonnull %x) %0 = getelementptr i32, ptr %x, i32 5 store i32 42, ptr %0, align 4 %1 = getelementptr i32, ptr %x, i32 14 store i32 43, ptr %1, align 4 call void @use(ptr nonnull %x) - call void @llvm.lifetime.end.p0(i64 128, ptr nonnull %x) + call void @llvm.lifetime.end.p0(ptr nonnull %x) ret void } @@ -187,20 +187,20 @@ define void @InitNonConst(i32 %v) sanitize_memtag { ; CHECK-NEXT: [[BASETAG:%.*]] = call ptr @llvm.aarch64.irg.sp(i64 0) ; CHECK-NEXT: [[X:%.*]] = alloca { i32, [12 x i8] }, align 16 ; CHECK-NEXT: [[X_TAG:%.*]] = call ptr @llvm.aarch64.tagp.p0(ptr [[X]], ptr [[BASETAG]], i64 0) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X]]) ; CHECK-NEXT: [[TMP0:%.*]] = zext i32 [[V]] to i64 ; CHECK-NEXT: call void @llvm.aarch64.stgp(ptr [[X_TAG]], i64 [[TMP0]], i64 0) ; CHECK-NEXT: call void @use(ptr nonnull [[X_TAG]]) ; CHECK-NEXT: call void @llvm.aarch64.settag(ptr [[X]], i64 16) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X]]) ; CHECK-NEXT: ret void ; entry: %x = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %x) + call void @llvm.lifetime.start.p0(ptr nonnull %x) store i32 %v, ptr %x, align 4 call void @use(ptr nonnull %x) - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %x) + call void @llvm.lifetime.end.p0(ptr nonnull %x) ret void } @@ -212,7 +212,7 @@ define void @InitNonConst2(i32 %v, i32 %w) sanitize_memtag { ; CHECK-NEXT: [[BASETAG:%.*]] = call ptr @llvm.aarch64.irg.sp(i64 0) ; CHECK-NEXT: [[X:%.*]] = alloca i32, i32 4, align 16 ; CHECK-NEXT: [[TX:%.*]] = call ptr @llvm.aarch64.tagp.p0(ptr [[X]], ptr [[BASETAG]], i64 0) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X]]) ; CHECK-NEXT: [[TMP0:%.*]] = zext i32 [[V]] to i64 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i32, ptr [[TX]], i32 1 ; CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[W]] to i64 @@ -221,17 +221,17 @@ define void @InitNonConst2(i32 %v, i32 %w) sanitize_memtag { ; CHECK-NEXT: call void @llvm.aarch64.stgp(ptr [[TX]], i64 [[VW]], i64 0) ; CHECK-NEXT: call void @use(ptr nonnull [[TX]]) ; CHECK-NEXT: call void @llvm.aarch64.settag(ptr [[X]], i64 16) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X]]) ; CHECK-NEXT: ret void ; entry: %x = alloca i32, i32 4, align 4 - call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %x) + call void @llvm.lifetime.start.p0(ptr nonnull %x) store i32 %v, ptr %x, align 4 %0 = getelementptr i32, ptr %x, i32 1 store i32 %w, ptr %0, align 4 call void @use(ptr nonnull %x) - call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %x) + call void @llvm.lifetime.end.p0(ptr nonnull %x) ret void } @@ -243,19 +243,19 @@ define void @InitVector() sanitize_memtag { ; CHECK-NEXT: [[BASETAG:%.*]] = call ptr @llvm.aarch64.irg.sp(i64 0) ; CHECK-NEXT: [[X:%.*]] = alloca i32, i32 4, align 16 ; CHECK-NEXT: [[TX:%.*]] = call ptr @llvm.aarch64.tagp.p0(ptr [[X]], ptr [[BASETAG]], i64 0) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X]]) ; CHECK-NEXT: call void @llvm.aarch64.stgp(ptr [[TX]], i64 bitcast (<2 x i32> <i32 1, i32 2> to i64), i64 0) ; CHECK-NEXT: call void @use(ptr nonnull [[TX]]) ; CHECK-NEXT: call void @llvm.aarch64.settag(ptr [[X]], i64 16) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X]]) ; CHECK-NEXT: ret void ; entry: %x = alloca i32, i32 4, align 4 - call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %x) + call void @llvm.lifetime.start.p0(ptr nonnull %x) store <2 x i32> <i32 1, i32 2>, ptr %x, align 4 call void @use(ptr nonnull %x) - call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %x) + call void @llvm.lifetime.end.p0(ptr nonnull %x) ret void } @@ -302,23 +302,23 @@ define void @InitVectorSplit() sanitize_memtag { ; CHECK-NEXT: [[BASETAG:%.*]] = call ptr @llvm.aarch64.irg.sp(i64 0) ; CHECK-NEXT: [[X:%.*]] = alloca i32, i32 4, align 16 ; CHECK-NEXT: [[TX:%.*]] = call ptr @llvm.aarch64.tagp.p0(ptr [[X]], ptr [[BASETAG]], i64 0) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X]]) ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i32, ptr [[TX]], i32 1 ; CHECK-NEXT: [[TMP1:%.*]] = shl i64 bitcast (<2 x i32> <i32 1, i32 2> to i64), 32 ; CHECK-NEXT: [[LSHR:%.*]] = lshr i64 bitcast (<2 x i32> <i32 1, i32 2> to i64), 32 ; CHECK-NEXT: call void @llvm.aarch64.stgp(ptr [[TX]], i64 [[TMP1]], i64 [[LSHR]]) ; CHECK-NEXT: call void @use(ptr nonnull [[TX]]) ; CHECK-NEXT: call void @llvm.aarch64.settag(ptr [[X]], i64 16) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X]]) ; CHECK-NEXT: ret void ; entry: %x = alloca i32, i32 4, align 4 - call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %x) + call void @llvm.lifetime.start.p0(ptr nonnull %x) %0 = getelementptr i32, ptr %x, i32 1 store <2 x i32> <i32 1, i32 2>, ptr %0, align 4 call void @use(ptr nonnull %x) - call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %x) + call void @llvm.lifetime.end.p0(ptr nonnull %x) ret void } diff --git a/llvm/test/CodeGen/AArch64/stack-tagging-merge-past-memcpy.mir b/llvm/test/CodeGen/AArch64/stack-tagging-merge-past-memcpy.mir index 45f6bfe..0fa5103 100644 --- a/llvm/test/CodeGen/AArch64/stack-tagging-merge-past-memcpy.mir +++ b/llvm/test/CodeGen/AArch64/stack-tagging-merge-past-memcpy.mir @@ -18,15 +18,15 @@ %C.tag = call ptr @llvm.aarch64.tagp.p0(ptr %C, ptr %basetag, i64 1) call void @llvm.aarch64.settag(ptr %C.tag, i64 32) call void @F56(ptr %C.tag) - call void @llvm.lifetime.start.p0(i64 32, ptr %A) + call void @llvm.lifetime.start.p0(ptr %A) call void @llvm.aarch64.settag(ptr %A.tag, i64 32) call void @F56(ptr %A.tag) call void @llvm.aarch64.settag(ptr %A, i64 32) - call void @llvm.lifetime.end.p0(i64 32, ptr %A) - call void @llvm.lifetime.start.p0(i64 32, ptr %A) + call void @llvm.lifetime.end.p0(ptr %A) + call void @llvm.lifetime.start.p0(ptr %A) call void @llvm.memcpy.p0.p0.i64(ptr align 4 %A, ptr align 4 @glob, i64 32, i1 false) call void @F78(ptr %A) - call void @llvm.lifetime.end.p0(i64 32, ptr %A) + call void @llvm.lifetime.end.p0(ptr %A) call void @llvm.aarch64.settag(ptr %C, i64 32) ret void } diff --git a/llvm/test/CodeGen/AArch64/stack-tagging-untag-placement.ll b/llvm/test/CodeGen/AArch64/stack-tagging-untag-placement.ll index aa9cccc..91adf82 100644 --- a/llvm/test/CodeGen/AArch64/stack-tagging-untag-placement.ll +++ b/llvm/test/CodeGen/AArch64/stack-tagging-untag-placement.ll @@ -17,17 +17,17 @@ S0: S1: ; CHECK-LABEL: S1: - call void @llvm.lifetime.start.p0(i64 48, ptr nonnull %v) #1 + call void @llvm.lifetime.start.p0(ptr nonnull %v) #1 ; CHECK: call void @llvm.aarch64.settag(ptr %v.tag, i64 48) - call void @llvm.lifetime.start.p0(i64 48, ptr nonnull %w) #1 + call void @llvm.lifetime.start.p0(ptr nonnull %w) #1 ; CHECK: call void @llvm.aarch64.settag(ptr %w.tag, i64 48) %t1 = call i32 @g1(ptr nonnull %v, ptr nonnull %w) #1 ; CHECK: call i32 @g1 ; CHECK-NOT: settag{{.*}}%v ; CHECK: call void @llvm.aarch64.settag(ptr %w, i64 48) ; CHECK-NOT: settag{{.*}}%v - call void @llvm.lifetime.end.p0(i64 48, ptr nonnull %w) #1 -; CHECK: call void @llvm.lifetime.end.p0(i64 48, ptr nonnull %w) + call void @llvm.lifetime.end.p0(ptr nonnull %w) #1 +; CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %w) %b1 = icmp eq i32 %t1, 0 br i1 %b1, label %S2, label %S3 ; CHECK-NOT: settag @@ -40,7 +40,7 @@ S2: S3: ; CHECK-LABEL: S3: - call void @llvm.lifetime.end.p0(i64 48, ptr nonnull %v) #1 + call void @llvm.lifetime.end.p0(ptr nonnull %v) #1 tail call void @z1() #1 br label %exit2 ; CHECK-NOT: settag @@ -73,9 +73,9 @@ declare void @z1() #0 declare void @z2() #0 -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 attributes #0 = { sanitize_memtag "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+mte,+neon,+v8.5a" "unsafe-fp-math"="false" "use-soft-float"="false" } attributes #1 = { nounwind } diff --git a/llvm/test/CodeGen/AMDGPU/bf16-math.ll b/llvm/test/CodeGen/AMDGPU/bf16-math.ll index 3a82f84..30a7864 100644 --- a/llvm/test/CodeGen/AMDGPU/bf16-math.ll +++ b/llvm/test/CodeGen/AMDGPU/bf16-math.ll @@ -370,9 +370,6 @@ define amdgpu_ps bfloat @test_clamp_bf16_folding(bfloat %src) { ; GCN: ; %bb.0: ; GCN-NEXT: v_exp_bf16_e64 v0, v0 clamp ; GCN-NEXT: ; return to shader part epilog - - - %exp = call bfloat @llvm.exp2.bf16(bfloat %src) %max = call bfloat @llvm.maxnum.bf16(bfloat %exp, bfloat 0.0) %clamp = call bfloat @llvm.minnum.bf16(bfloat %max, bfloat 1.0) @@ -384,9 +381,6 @@ define amdgpu_ps float @test_clamp_v2bf16_folding(<2 x bfloat> %src0, <2 x bfloa ; GCN: ; %bb.0: ; GCN-NEXT: v_pk_mul_bf16 v0, v0, v1 clamp ; GCN-NEXT: ; return to shader part epilog - - - %mul = fmul <2 x bfloat> %src0, %src1 %max = call <2 x bfloat> @llvm.maxnum.v2bf16(<2 x bfloat> %mul, <2 x bfloat> <bfloat 0.0, bfloat 0.0>) %clamp = call <2 x bfloat> @llvm.minnum.v2bf16(<2 x bfloat> %max, <2 x bfloat> <bfloat 1.0, bfloat 1.0>) @@ -400,9 +394,6 @@ define amdgpu_ps void @v_test_mul_add_v2bf16_vvv(ptr addrspace(1) %out, <2 x bfl ; GCN-NEXT: v_pk_fma_bf16 v2, v2, v3, v4 ; GCN-NEXT: global_store_b32 v[0:1], v2, off ; GCN-NEXT: s_endpgm - - - %mul = fmul contract <2 x bfloat> %a, %b %add = fadd contract <2 x bfloat> %mul, %c store <2 x bfloat> %add, ptr addrspace(1) %out @@ -415,9 +406,6 @@ define amdgpu_ps void @v_test_mul_add_v2bf16_vss(ptr addrspace(1) %out, <2 x bfl ; GCN-NEXT: v_pk_fma_bf16 v2, v2, s0, s1 ; GCN-NEXT: global_store_b32 v[0:1], v2, off ; GCN-NEXT: s_endpgm - - - %mul = fmul contract <2 x bfloat> %a, %b %add = fadd contract <2 x bfloat> %mul, %c store <2 x bfloat> %add, ptr addrspace(1) %out @@ -432,9 +420,6 @@ define amdgpu_ps void @v_test_mul_add_v2bf16_sss(ptr addrspace(1) %out, <2 x bfl ; GCN-NEXT: v_pk_fma_bf16 v2, s0, s1, v2 ; GCN-NEXT: global_store_b32 v[0:1], v2, off ; GCN-NEXT: s_endpgm - - - %mul = fmul contract <2 x bfloat> %a, %b %add = fadd contract <2 x bfloat> %mul, %c store <2 x bfloat> %add, ptr addrspace(1) %out @@ -447,9 +432,6 @@ define amdgpu_ps void @v_test_mul_add_v2bf16_vsc(ptr addrspace(1) %out, <2 x bfl ; GCN-NEXT: v_pk_fma_bf16 v2, v2, s0, 0.5 op_sel_hi:[1,1,0] ; GCN-NEXT: global_store_b32 v[0:1], v2, off ; GCN-NEXT: s_endpgm - - - %mul = fmul contract <2 x bfloat> %a, %b %add = fadd contract <2 x bfloat> %mul, <bfloat 0.5, bfloat 0.5> store <2 x bfloat> %add, ptr addrspace(1) %out @@ -464,9 +446,6 @@ define amdgpu_ps void @v_test_mul_add_v2bf16_vll(ptr addrspace(1) %out, <2 x bfl ; GCN-NEXT: v_pk_fma_bf16 v2, 0x42c83f80, v2, s0 ; GCN-NEXT: global_store_b32 v[0:1], v2, off ; GCN-NEXT: s_endpgm - - - %mul = fmul contract <2 x bfloat> %a, <bfloat 1.0, bfloat 100.0> %add = fadd contract <2 x bfloat> %mul, <bfloat 2.0, bfloat 200.0> store <2 x bfloat> %add, ptr addrspace(1) %out diff --git a/llvm/test/CodeGen/AMDGPU/bf16.ll b/llvm/test/CodeGen/AMDGPU/bf16.ll index 8f8ea13..505ddc8 100644 --- a/llvm/test/CodeGen/AMDGPU/bf16.ll +++ b/llvm/test/CodeGen/AMDGPU/bf16.ll @@ -24671,7 +24671,6 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) { ret <32 x bfloat> %op } - declare bfloat @llvm.maxnum.bf16(bfloat, bfloat) declare <2 x bfloat> @llvm.maxnum.v2bf16(<2 x bfloat>, <2 x bfloat>) declare <3 x bfloat> @llvm.maxnum.v3bf16(<3 x bfloat>, <3 x bfloat>) @@ -29673,7 +29672,6 @@ define { bfloat, i16 } @v_frexp_bf16_i16(bfloat %a) { ret { bfloat, i16 } %op } - declare bfloat @llvm.log.bf16(bfloat) declare bfloat @llvm.log2.bf16(bfloat) declare bfloat @llvm.log10.bf16(bfloat) diff --git a/llvm/test/CodeGen/AMDGPU/expand-variadic-call.ll b/llvm/test/CodeGen/AMDGPU/expand-variadic-call.ll index f58cb84..839d0ba 100644 --- a/llvm/test/CodeGen/AMDGPU/expand-variadic-call.ll +++ b/llvm/test/CodeGen/AMDGPU/expand-variadic-call.ll @@ -38,11 +38,11 @@ define hidden void @copy(ptr noundef %va) { ; CHECK-NEXT: %va.addr.ascast = addrspacecast ptr addrspace(5) %va.addr to ptr ; CHECK-NEXT: %cp.ascast = addrspacecast ptr addrspace(5) %cp to ptr ; CHECK-NEXT: store ptr %va, ptr addrspace(5) %va.addr, align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) %cp) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %cp) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr %cp.ascast, ptr %va.addr.ascast, i32 8, i1 false) ; CHECK-NEXT: %0 = load ptr, ptr addrspace(5) %cp, align 8 ; CHECK-NEXT: call void @valist(ptr noundef %0) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) %cp) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %cp) ; CHECK-NEXT: ret void ; entry: @@ -51,43 +51,43 @@ entry: %va.addr.ascast = addrspacecast ptr addrspace(5) %va.addr to ptr %cp.ascast = addrspacecast ptr addrspace(5) %cp to ptr store ptr %va, ptr addrspace(5) %va.addr, align 8 - call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) %cp) + call void @llvm.lifetime.start.p5(ptr addrspace(5) %cp) call void @llvm.va_copy.p0(ptr %cp.ascast, ptr nonnull %va.addr.ascast) %0 = load ptr, ptr addrspace(5) %cp, align 8 call void @valist(ptr noundef %0) - call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) %cp) + call void @llvm.lifetime.end.p5(ptr addrspace(5) %cp) ret void } -declare void @llvm.lifetime.start.p5(i64 immarg, ptr addrspace(5) nocapture) +declare void @llvm.lifetime.start.p5(ptr addrspace(5) nocapture) declare void @llvm.va_copy.p0(ptr, ptr) declare hidden void @valist(ptr noundef) -declare void @llvm.lifetime.end.p5(i64 immarg, ptr addrspace(5) nocapture) +declare void @llvm.lifetime.end.p5(ptr addrspace(5) nocapture) define hidden void @start_once(...) { ; CHECK-LABEL: define {{[^@]+}}@start_once(ptr %varargs) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %s = alloca ptr, align 8, addrspace(5) ; CHECK-NEXT: %s.ascast = addrspacecast ptr addrspace(5) %s to ptr -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) %s) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %s) ; CHECK-NEXT: store ptr %varargs, ptr %s.ascast, align 8 ; CHECK-NEXT: %0 = load ptr, ptr addrspace(5) %s, align 8 ; CHECK-NEXT: call void @valist(ptr noundef %0) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) %s) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %s) ; CHECK-NEXT: ret void ; entry: %s = alloca ptr, align 8, addrspace(5) %s.ascast = addrspacecast ptr addrspace(5) %s to ptr - call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) %s) + call void @llvm.lifetime.start.p5(ptr addrspace(5) %s) call void @llvm.va_start.p0(ptr %s.ascast) %0 = load ptr, ptr addrspace(5) %s, align 8 call void @valist(ptr noundef %0) call void @llvm.va_end.p0(ptr %s.ascast) - call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) %s) + call void @llvm.lifetime.end.p5(ptr addrspace(5) %s) ret void } @@ -102,16 +102,16 @@ define hidden void @start_twice(...) { ; CHECK-NEXT: %s1 = alloca ptr, align 8, addrspace(5) ; CHECK-NEXT: %s0.ascast = addrspacecast ptr addrspace(5) %s0 to ptr ; CHECK-NEXT: %s1.ascast = addrspacecast ptr addrspace(5) %s1 to ptr -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) %s0) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) %s1) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %s0) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %s1) ; CHECK-NEXT: store ptr %varargs, ptr %s0.ascast, align 8 ; CHECK-NEXT: %0 = load ptr, ptr addrspace(5) %s0, align 8 ; CHECK-NEXT: call void @valist(ptr noundef %0) ; CHECK-NEXT: store ptr %varargs, ptr %s1.ascast, align 8 ; CHECK-NEXT: %1 = load ptr, ptr addrspace(5) %s1, align 8 ; CHECK-NEXT: call void @valist(ptr noundef %1) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) %s1) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) %s0) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %s1) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %s0) ; CHECK-NEXT: ret void ; entry: @@ -119,8 +119,8 @@ entry: %s1 = alloca ptr, align 8, addrspace(5) %s0.ascast = addrspacecast ptr addrspace(5) %s0 to ptr %s1.ascast = addrspacecast ptr addrspace(5) %s1 to ptr - call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) %s0) - call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) %s1) + call void @llvm.lifetime.start.p5(ptr addrspace(5) %s0) + call void @llvm.lifetime.start.p5(ptr addrspace(5) %s1) call void @llvm.va_start.p0(ptr %s0.ascast) %0 = load ptr, ptr addrspace(5) %s0, align 8 call void @valist(ptr noundef %0) @@ -129,8 +129,8 @@ entry: %1 = load ptr, ptr addrspace(5) %s1, align 8 call void @valist(ptr noundef %1) call void @llvm.va_end.p0(ptr %s1.ascast) - call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) %s1) - call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) %s0) + call void @llvm.lifetime.end.p5(ptr addrspace(5) %s1) + call void @llvm.lifetime.end.p5(ptr addrspace(5) %s0) ret void } @@ -138,12 +138,12 @@ define hidden void @single_i32(i32 noundef %x) { ; CHECK-LABEL: define {{[^@]+}}@single_i32(i32 noundef %x) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %single_i32.vararg, align 4, addrspace(5) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %single_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store i32 %x, ptr addrspace(5) %0, align 4 ; CHECK-NEXT: %1 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void @vararg(ptr %1) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -157,12 +157,12 @@ define hidden void @single_double(double noundef %x) { ; CHECK-LABEL: define {{[^@]+}}@single_double(double noundef %x) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %single_double.vararg, align 4, addrspace(5) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %single_double.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store double %x, ptr addrspace(5) %0, align 8 ; CHECK-NEXT: %1 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void @vararg(ptr %1) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -174,12 +174,12 @@ define hidden void @single_v4f32(<4 x float> noundef %x) { ; CHECK-LABEL: define {{[^@]+}}@single_v4f32(<4 x float> noundef %x) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %single_v4f32.vararg, align 4, addrspace(5) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 16, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %single_v4f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store <4 x float> %x, ptr addrspace(5) %0, align 16 ; CHECK-NEXT: %1 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void @vararg(ptr %1) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 16, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -191,12 +191,12 @@ define hidden void @single_v8f32(<8 x float> noundef %x) { ; CHECK-LABEL: define {{[^@]+}}@single_v8f32(<8 x float> noundef %x) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %single_v8f32.vararg, align 4, addrspace(5) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 32, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %single_v8f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store <8 x float> %x, ptr addrspace(5) %0, align 32 ; CHECK-NEXT: %1 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void @vararg(ptr %1) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 32, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -208,12 +208,12 @@ define hidden void @single_v16f32(<16 x float> noundef %x) { ; CHECK-LABEL: define {{[^@]+}}@single_v16f32(<16 x float> noundef %x) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %single_v16f32.vararg, align 4, addrspace(5) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 64, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %single_v16f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store <16 x float> %x, ptr addrspace(5) %0, align 64 ; CHECK-NEXT: %1 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void @vararg(ptr %1) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 64, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -225,12 +225,12 @@ define hidden void @single_v32f32(<32 x float> noundef %x) { ; CHECK-LABEL: define {{[^@]+}}@single_v32f32(<32 x float> noundef %x) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %single_v32f32.vararg, align 4, addrspace(5) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 128, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %single_v32f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store <32 x float> %x, ptr addrspace(5) %0, align 128 ; CHECK-NEXT: %1 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void @vararg(ptr %1) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 128, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -242,14 +242,14 @@ define hidden void @i32_double(i32 noundef %x, double noundef %y) { ; CHECK-LABEL: define {{[^@]+}}@i32_double(i32 noundef %x, double noundef %y) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %i32_double.vararg, align 4, addrspace(5) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 12, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %i32_double.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store i32 %x, ptr addrspace(5) %0, align 4 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %i32_double.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1 ; CHECK-NEXT: store double %y, ptr addrspace(5) %1, align 8 ; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void @vararg(ptr %2) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 12, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -261,14 +261,14 @@ define hidden void @double_i32(double noundef %x, i32 noundef %y) { ; CHECK-LABEL: define {{[^@]+}}@double_i32(double noundef %x, i32 noundef %y) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %double_i32.vararg, align 4, addrspace(5) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 12, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %double_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store double %x, ptr addrspace(5) %0, align 8 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %double_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1 ; CHECK-NEXT: store i32 %y, ptr addrspace(5) %1, align 4 ; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void @vararg(ptr %2) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 12, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -286,14 +286,14 @@ define hidden void @i32_libcS(i32 noundef %x, i8 %y.coerce0, i16 %y.coerce1, i32 ; CHECK-NEXT: %.fca.3.insert = insertvalue %struct.libcS %.fca.2.insert, i64 %y.coerce3, 3 ; CHECK-NEXT: %.fca.4.insert = insertvalue %struct.libcS %.fca.3.insert, float %y.coerce4, 4 ; CHECK-NEXT: %.fca.5.insert = insertvalue %struct.libcS %.fca.4.insert, double %y.coerce5, 5 -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 36, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %i32_libcS.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store i32 %x, ptr addrspace(5) %0, align 4 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %i32_libcS.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1 ; CHECK-NEXT: store %struct.libcS %.fca.5.insert, ptr addrspace(5) %1, align 8 ; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void @vararg(ptr %2) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 36, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -317,14 +317,14 @@ define hidden void @libcS_i32(i8 %x.coerce0, i16 %x.coerce1, i32 %x.coerce2, i64 ; CHECK-NEXT: %.fca.3.insert = insertvalue %struct.libcS %.fca.2.insert, i64 %x.coerce3, 3 ; CHECK-NEXT: %.fca.4.insert = insertvalue %struct.libcS %.fca.3.insert, float %x.coerce4, 4 ; CHECK-NEXT: %.fca.5.insert = insertvalue %struct.libcS %.fca.4.insert, double %x.coerce5, 5 -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 36, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %libcS_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store %struct.libcS %.fca.5.insert, ptr addrspace(5) %0, align 8 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %libcS_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1 ; CHECK-NEXT: store i32 %y, ptr addrspace(5) %1, align 4 ; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void @vararg(ptr %2) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 36, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -342,14 +342,14 @@ define hidden void @i32_v4f32(i32 noundef %x, <4 x float> noundef %y) { ; CHECK-LABEL: define {{[^@]+}}@i32_v4f32(i32 noundef %x, <4 x float> noundef %y) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %i32_v4f32.vararg, align 4, addrspace(5) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 20, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %i32_v4f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store i32 %x, ptr addrspace(5) %0, align 4 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %i32_v4f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1 ; CHECK-NEXT: store <4 x float> %y, ptr addrspace(5) %1, align 16 ; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void @vararg(ptr %2) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 20, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -361,14 +361,14 @@ define hidden void @v4f32_i32(<4 x float> noundef %x, i32 noundef %y) { ; CHECK-LABEL: define {{[^@]+}}@v4f32_i32(<4 x float> noundef %x, i32 noundef %y) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %v4f32_i32.vararg, align 4, addrspace(5) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 20, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %v4f32_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store <4 x float> %x, ptr addrspace(5) %0, align 16 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %v4f32_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1 ; CHECK-NEXT: store i32 %y, ptr addrspace(5) %1, align 4 ; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void @vararg(ptr %2) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 20, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -380,14 +380,14 @@ define hidden void @i32_v8f32(i32 noundef %x, <8 x float> noundef %y) { ; CHECK-LABEL: define {{[^@]+}}@i32_v8f32(i32 noundef %x, <8 x float> noundef %y) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %i32_v8f32.vararg, align 4, addrspace(5) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 36, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %i32_v8f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store i32 %x, ptr addrspace(5) %0, align 4 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %i32_v8f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1 ; CHECK-NEXT: store <8 x float> %y, ptr addrspace(5) %1, align 32 ; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void @vararg(ptr %2) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 36, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -399,14 +399,14 @@ define hidden void @v8f32_i32(<8 x float> noundef %x, i32 noundef %y) { ; CHECK-LABEL: define {{[^@]+}}@v8f32_i32(<8 x float> noundef %x, i32 noundef %y) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %v8f32_i32.vararg, align 4, addrspace(5) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 36, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %v8f32_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store <8 x float> %x, ptr addrspace(5) %0, align 32 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %v8f32_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1 ; CHECK-NEXT: store i32 %y, ptr addrspace(5) %1, align 4 ; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void @vararg(ptr %2) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 36, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -418,14 +418,14 @@ define hidden void @i32_v16f32(i32 noundef %x, <16 x float> noundef %y) { ; CHECK-LABEL: define {{[^@]+}}@i32_v16f32(i32 noundef %x, <16 x float> noundef %y) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %i32_v16f32.vararg, align 4, addrspace(5) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 68, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %i32_v16f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store i32 %x, ptr addrspace(5) %0, align 4 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %i32_v16f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1 ; CHECK-NEXT: store <16 x float> %y, ptr addrspace(5) %1, align 64 ; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void @vararg(ptr %2) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 68, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -437,14 +437,14 @@ define hidden void @v16f32_i32(<16 x float> noundef %x, i32 noundef %y) { ; CHECK-LABEL: define {{[^@]+}}@v16f32_i32(<16 x float> noundef %x, i32 noundef %y) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %v16f32_i32.vararg, align 4, addrspace(5) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 68, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %v16f32_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store <16 x float> %x, ptr addrspace(5) %0, align 64 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %v16f32_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1 ; CHECK-NEXT: store i32 %y, ptr addrspace(5) %1, align 4 ; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void @vararg(ptr %2) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 68, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -456,14 +456,14 @@ define hidden void @i32_v32f32(i32 noundef %x, <32 x float> noundef %y) { ; CHECK-LABEL: define {{[^@]+}}@i32_v32f32(i32 noundef %x, <32 x float> noundef %y) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %i32_v32f32.vararg, align 4, addrspace(5) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 132, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %i32_v32f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store i32 %x, ptr addrspace(5) %0, align 4 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %i32_v32f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1 ; CHECK-NEXT: store <32 x float> %y, ptr addrspace(5) %1, align 128 ; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void @vararg(ptr %2) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 132, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -475,14 +475,14 @@ define hidden void @v32f32_i32(<32 x float> noundef %x, i32 noundef %y) { ; CHECK-LABEL: define {{[^@]+}}@v32f32_i32(<32 x float> noundef %x, i32 noundef %y) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %v32f32_i32.vararg, align 4, addrspace(5) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 132, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %v32f32_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store <32 x float> %x, ptr addrspace(5) %0, align 128 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %v32f32_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1 ; CHECK-NEXT: store i32 %y, ptr addrspace(5) %1, align 4 ; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void @vararg(ptr %2) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 132, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -495,12 +495,12 @@ define hidden void @fptr_single_i32(i32 noundef %x) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %fptr_single_i32.vararg, align 4, addrspace(5) ; CHECK-NEXT: %0 = load volatile ptr, ptr addrspacecast (ptr addrspace(1) @vararg_ptr to ptr), align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %1 = getelementptr inbounds nuw %fptr_single_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store i32 %x, ptr addrspace(5) %1, align 4 ; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void %0(ptr %2) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -520,12 +520,12 @@ define hidden void @fptr_libcS(i8 %x.coerce0, i16 %x.coerce1, i32 %x.coerce2, i6 ; CHECK-NEXT: %.fca.3.insert = insertvalue %struct.libcS %.fca.2.insert, i64 %x.coerce3, 3 ; CHECK-NEXT: %.fca.4.insert = insertvalue %struct.libcS %.fca.3.insert, float %x.coerce4, 4 ; CHECK-NEXT: %.fca.5.insert = insertvalue %struct.libcS %.fca.4.insert, double %x.coerce5, 5 -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 32, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %1 = getelementptr inbounds nuw %fptr_libcS.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store %struct.libcS %.fca.5.insert, ptr addrspace(5) %1, align 8 ; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void %0(ptr %2) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 32, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: diff --git a/llvm/test/CodeGen/AMDGPU/load-constant-always-uniform.ll b/llvm/test/CodeGen/AMDGPU/load-constant-always-uniform.ll index 91a8446..13ea8b0 100644 --- a/llvm/test/CodeGen/AMDGPU/load-constant-always-uniform.ll +++ b/llvm/test/CodeGen/AMDGPU/load-constant-always-uniform.ll @@ -18,10 +18,9 @@ define amdgpu_cs void @test_uniform_load_b96(ptr addrspace(1) %ptr, i32 %arg) "a ; GFX11-NEXT: s_load_b64 s[2:3], s[0:1], 0x0 ; GFX11-NEXT: s_load_b32 s0, s[0:1], 0x8 ; GFX11-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-NEXT: s_or_b32 s1, s2, s3 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GFX11-NEXT: s_or_b32 s0, s0, s1 -; GFX11-NEXT: v_mov_b32_e32 v2, s0 +; GFX11-NEXT: v_mov_b32_e32 v2, s3 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_or3_b32 v2, s2, v2, s0 ; GFX11-NEXT: global_store_b32 v[0:1], v2, off ; GFX11-NEXT: s_endpgm ; @@ -34,14 +33,12 @@ define amdgpu_cs void @test_uniform_load_b96(ptr addrspace(1) %ptr, i32 %arg) "a ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX12-NEXT: v_add_co_ci_u32_e64 v3, null, v1, v3, vcc_lo ; GFX12-NEXT: v_readfirstlane_b32 s0, v2 -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(SALU_CYCLE_1) +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1) ; GFX12-NEXT: v_readfirstlane_b32 s1, v3 ; GFX12-NEXT: s_load_b96 s[0:2], s[0:1], 0x0 ; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: s_or_b32 s0, s0, s1 -; GFX12-NEXT: s_or_b32 s0, s2, s0 -; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX12-NEXT: v_mov_b32_e32 v2, s0 +; GFX12-NEXT: v_or3_b32 v2, v2, s1, s2 ; GFX12-NEXT: global_store_b32 v[0:1], v2, off ; GFX12-NEXT: s_endpgm bb: diff --git a/llvm/test/CodeGen/AMDGPU/machine-scheduler-sink-trivial-remats-attr.mir b/llvm/test/CodeGen/AMDGPU/machine-scheduler-sink-trivial-remats-attr.mir index 23412aa..3b3ea3f 100644 --- a/llvm/test/CodeGen/AMDGPU/machine-scheduler-sink-trivial-remats-attr.mir +++ b/llvm/test/CodeGen/AMDGPU/machine-scheduler-sink-trivial-remats-attr.mir @@ -347,8 +347,10 @@ body: | ... # User-requested maximum number of VGPRs need to be taken into account by # the scheduler's rematerialization stage. Register usage above that number -# is considered like spill; occupancy is "inadvertently" increased when -# eliminating spill. +# is considered like spill. On unified RF (gfx90a), the requested number is +# understood "per-bank", effectively doubling its value, so no rematerialization +# is necessary. +--- name: small_num_vgprs_as_spill tracksRegLiveness: true machineFunctionInfo: @@ -371,36 +373,15 @@ body: | ; GFX908-NEXT: [[V_CVT_I32_F64_e32_10:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 10, implicit $exec, implicit $mode, implicit-def $m0 ; GFX908-NEXT: [[V_CVT_I32_F64_e32_11:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 11, implicit $exec, implicit $mode, implicit-def $m0 ; GFX908-NEXT: [[V_CVT_I32_F64_e32_12:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 12, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_13:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 13, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_14:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 14, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_15:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 15, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_16:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 16, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_17:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 17, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_18:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 18, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_19:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 19, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_20:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 20, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_21:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 21, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_22:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 22, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_23:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 23, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_24:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 24, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_25:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 25, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_26:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 26, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_27:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 33, implicit $exec, implicit $mode + ; GFX908-NEXT: [[V_CVT_I32_F64_e32_13:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 15, implicit $exec, implicit $mode ; GFX908-NEXT: {{ $}} ; GFX908-NEXT: bb.1: ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_]], implicit [[V_CVT_I32_F64_e32_1]], implicit [[V_CVT_I32_F64_e32_2]], implicit [[V_CVT_I32_F64_e32_3]], implicit [[V_CVT_I32_F64_e32_4]] ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_5]], implicit [[V_CVT_I32_F64_e32_6]], implicit [[V_CVT_I32_F64_e32_7]], implicit [[V_CVT_I32_F64_e32_8]], implicit [[V_CVT_I32_F64_e32_9]] - ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_10]], implicit [[V_CVT_I32_F64_e32_11]], implicit [[V_CVT_I32_F64_e32_12]], implicit [[V_CVT_I32_F64_e32_13]], implicit [[V_CVT_I32_F64_e32_14]] - ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_15]], implicit [[V_CVT_I32_F64_e32_16]], implicit [[V_CVT_I32_F64_e32_17]], implicit [[V_CVT_I32_F64_e32_18]], implicit [[V_CVT_I32_F64_e32_19]] - ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_20]], implicit [[V_CVT_I32_F64_e32_21]], implicit [[V_CVT_I32_F64_e32_22]], implicit [[V_CVT_I32_F64_e32_23]], implicit [[V_CVT_I32_F64_e32_24]] - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_28:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 27, implicit $exec, implicit $mode - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_29:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 28, implicit $exec, implicit $mode - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_30:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 29, implicit $exec, implicit $mode - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_31:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 30, implicit $exec, implicit $mode - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_32:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 31, implicit $exec, implicit $mode - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_33:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 32, implicit $exec, implicit $mode - ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_25]], implicit [[V_CVT_I32_F64_e32_26]], implicit [[V_CVT_I32_F64_e32_28]], implicit [[V_CVT_I32_F64_e32_29]], implicit [[V_CVT_I32_F64_e32_30]] - ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_31]], implicit [[V_CVT_I32_F64_e32_32]], implicit [[V_CVT_I32_F64_e32_33]], implicit [[V_CVT_I32_F64_e32_27]] + ; GFX908-NEXT: [[V_CVT_I32_F64_e32_14:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 13, implicit $exec, implicit $mode + ; GFX908-NEXT: [[V_CVT_I32_F64_e32_15:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 14, implicit $exec, implicit $mode + ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_10]], implicit [[V_CVT_I32_F64_e32_11]], implicit [[V_CVT_I32_F64_e32_12]], implicit [[V_CVT_I32_F64_e32_14]], implicit [[V_CVT_I32_F64_e32_15]] + ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_13]] ; GFX908-NEXT: S_ENDPGM 0 ; ; GFX90A-LABEL: name: small_num_vgprs_as_spill @@ -420,36 +401,15 @@ body: | ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_10:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 10, implicit $exec, implicit $mode, implicit-def $m0 ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_11:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 11, implicit $exec, implicit $mode, implicit-def $m0 ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_12:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 12, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_13:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 13, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_14:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 14, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_15:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 15, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_16:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 16, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_17:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 17, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_18:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 18, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_19:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 19, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_20:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 20, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_21:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 21, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_22:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 22, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_23:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 23, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_24:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 24, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_25:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 25, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_26:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 26, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_27:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 33, implicit $exec, implicit $mode + ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_13:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 13, implicit $exec, implicit $mode + ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_14:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 14, implicit $exec, implicit $mode + ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_15:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 15, implicit $exec, implicit $mode ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.1: ; GFX90A-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_]], implicit [[V_CVT_I32_F64_e32_1]], implicit [[V_CVT_I32_F64_e32_2]], implicit [[V_CVT_I32_F64_e32_3]], implicit [[V_CVT_I32_F64_e32_4]] ; GFX90A-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_5]], implicit [[V_CVT_I32_F64_e32_6]], implicit [[V_CVT_I32_F64_e32_7]], implicit [[V_CVT_I32_F64_e32_8]], implicit [[V_CVT_I32_F64_e32_9]] ; GFX90A-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_10]], implicit [[V_CVT_I32_F64_e32_11]], implicit [[V_CVT_I32_F64_e32_12]], implicit [[V_CVT_I32_F64_e32_13]], implicit [[V_CVT_I32_F64_e32_14]] - ; GFX90A-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_15]], implicit [[V_CVT_I32_F64_e32_16]], implicit [[V_CVT_I32_F64_e32_17]], implicit [[V_CVT_I32_F64_e32_18]], implicit [[V_CVT_I32_F64_e32_19]] - ; GFX90A-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_20]], implicit [[V_CVT_I32_F64_e32_21]], implicit [[V_CVT_I32_F64_e32_22]], implicit [[V_CVT_I32_F64_e32_23]], implicit [[V_CVT_I32_F64_e32_24]] - ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_28:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 27, implicit $exec, implicit $mode - ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_29:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 28, implicit $exec, implicit $mode - ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_30:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 29, implicit $exec, implicit $mode - ; GFX90A-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_25]], implicit [[V_CVT_I32_F64_e32_26]], implicit [[V_CVT_I32_F64_e32_28]], implicit [[V_CVT_I32_F64_e32_29]], implicit [[V_CVT_I32_F64_e32_30]] - ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_31:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 30, implicit $exec, implicit $mode - ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_32:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 31, implicit $exec, implicit $mode - ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_33:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 32, implicit $exec, implicit $mode - ; GFX90A-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_31]], implicit [[V_CVT_I32_F64_e32_32]], implicit [[V_CVT_I32_F64_e32_33]], implicit [[V_CVT_I32_F64_e32_27]] + ; GFX90A-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_15]] ; GFX90A-NEXT: S_ENDPGM 0 bb.0: successors: %bb.1 @@ -467,38 +427,16 @@ body: | %10:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 10, implicit $exec, implicit $mode, implicit-def $m0 %11:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 11, implicit $exec, implicit $mode, implicit-def $m0 %12:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 12, implicit $exec, implicit $mode, implicit-def $m0 - %13:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 13, implicit $exec, implicit $mode, implicit-def $m0 - %14:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 14, implicit $exec, implicit $mode, implicit-def $m0 - %15:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 15, implicit $exec, implicit $mode, implicit-def $m0 - %16:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 16, implicit $exec, implicit $mode, implicit-def $m0 - %17:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 17, implicit $exec, implicit $mode, implicit-def $m0 - %18:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 18, implicit $exec, implicit $mode, implicit-def $m0 - %19:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 19, implicit $exec, implicit $mode, implicit-def $m0 - %20:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 20, implicit $exec, implicit $mode, implicit-def $m0 - %21:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 21, implicit $exec, implicit $mode, implicit-def $m0 - %22:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 22, implicit $exec, implicit $mode, implicit-def $m0 - %23:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 23, implicit $exec, implicit $mode, implicit-def $m0 - %24:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 24, implicit $exec, implicit $mode, implicit-def $m0 - %25:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 25, implicit $exec, implicit $mode, implicit-def $m0 - %26:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 26, implicit $exec, implicit $mode, implicit-def $m0 - %27:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 27, implicit $exec, implicit $mode - %28:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 28, implicit $exec, implicit $mode - %29:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 29, implicit $exec, implicit $mode - %30:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 30, implicit $exec, implicit $mode - %31:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 31, implicit $exec, implicit $mode - %32:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 32, implicit $exec, implicit $mode - %33:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 33, implicit $exec, implicit $mode + %13:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 13, implicit $exec, implicit $mode + %14:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 14, implicit $exec, implicit $mode + %15:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 15, implicit $exec, implicit $mode bb.1: S_NOP 0, implicit %0, implicit %1, implicit %2, implicit %3, implicit %4 S_NOP 0, implicit %5, implicit %6, implicit %7, implicit %8, implicit %9 S_NOP 0, implicit %10, implicit %11, implicit %12, implicit %13, implicit %14 - S_NOP 0, implicit %15, implicit %16, implicit %17, implicit %18, implicit %19 - S_NOP 0, implicit %20, implicit %21, implicit %22, implicit %23, implicit %24 - S_NOP 0, implicit %25, implicit %26, implicit %27, implicit %28, implicit %29 - S_NOP 0, implicit %30, implicit %31, implicit %32, implicit %33 - + S_NOP 0, implicit %15 S_ENDPGM 0 ... # Min/Max occupancy is 8, but user requests 7, the scheduler's rematerialization @@ -815,9 +753,9 @@ body: | S_ENDPGM 0 ... # Min/Max waves/EU is 8. For targets with non-unified RF (gfx908) we are able to -# eliminate both ArchVGPR and AGPR spilling by saving 2 VGPRs. In the unified RF -# case (gfx90a) the ArchVGPR allocation granule forces us to remat more -# ArchVGPRs to eliminate spilling. +# eliminate both ArchVGPR and AGPR spilling by saving one of each. In the +# unified RF case (gfx90a) the ArchVGPR allocation granule may force us to remat +# more ArchVGPRs to eliminate spilling. --- name: reduce_arch_and_acc_vgrp_spill tracksRegLiveness: true @@ -860,6 +798,7 @@ body: | ; GFX908-NEXT: [[DEF28:%[0-9]+]]:agpr_32 = IMPLICIT_DEF ; GFX908-NEXT: [[DEF29:%[0-9]+]]:agpr_32 = IMPLICIT_DEF ; GFX908-NEXT: [[DEF30:%[0-9]+]]:agpr_32 = IMPLICIT_DEF + ; GFX908-NEXT: [[DEF31:%[0-9]+]]:agpr_32 = IMPLICIT_DEF ; GFX908-NEXT: [[V_CVT_I32_F64_e32_1:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 1, implicit $exec, implicit $mode, implicit-def $m0 ; GFX908-NEXT: [[V_CVT_I32_F64_e32_2:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 2, implicit $exec, implicit $mode, implicit-def $m0 ; GFX908-NEXT: [[V_CVT_I32_F64_e32_3:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 3, implicit $exec, implicit $mode, implicit-def $m0 @@ -886,12 +825,11 @@ body: | ; GFX908-NEXT: [[V_CVT_I32_F64_e32_24:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 24, implicit $exec, implicit $mode, implicit-def $m0 ; GFX908-NEXT: [[V_CVT_I32_F64_e32_25:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 25, implicit $exec, implicit $mode, implicit-def $m0 ; GFX908-NEXT: [[V_CVT_I32_F64_e32_26:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 26, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_27:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 27, implicit $exec, implicit $mode - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_28:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 28, implicit $exec, implicit $mode - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_29:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 29, implicit $exec, implicit $mode - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_30:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 30, implicit $exec, implicit $mode - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_31:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 31, implicit $exec, implicit $mode - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_32:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 64, implicit $exec, implicit $mode + ; GFX908-NEXT: [[V_CVT_I32_F64_e32_27:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 28, implicit $exec, implicit $mode + ; GFX908-NEXT: [[V_CVT_I32_F64_e32_28:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 29, implicit $exec, implicit $mode + ; GFX908-NEXT: [[V_CVT_I32_F64_e32_29:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 30, implicit $exec, implicit $mode + ; GFX908-NEXT: [[V_CVT_I32_F64_e32_30:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 31, implicit $exec, implicit $mode + ; GFX908-NEXT: [[V_CVT_I32_F64_e32_31:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 64, implicit $exec, implicit $mode ; GFX908-NEXT: {{ $}} ; GFX908-NEXT: bb.1: ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_]], implicit [[V_CVT_I32_F64_e32_1]], implicit [[V_CVT_I32_F64_e32_2]], implicit [[V_CVT_I32_F64_e32_3]], implicit [[V_CVT_I32_F64_e32_4]] @@ -899,17 +837,17 @@ body: | ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_10]], implicit [[V_CVT_I32_F64_e32_11]], implicit [[V_CVT_I32_F64_e32_12]], implicit [[V_CVT_I32_F64_e32_13]], implicit [[V_CVT_I32_F64_e32_14]] ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_15]], implicit [[V_CVT_I32_F64_e32_16]], implicit [[V_CVT_I32_F64_e32_17]], implicit [[V_CVT_I32_F64_e32_18]], implicit [[V_CVT_I32_F64_e32_19]] ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_20]], implicit [[V_CVT_I32_F64_e32_21]], implicit [[V_CVT_I32_F64_e32_22]], implicit [[V_CVT_I32_F64_e32_23]], implicit [[V_CVT_I32_F64_e32_24]] - ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_25]], implicit [[V_CVT_I32_F64_e32_26]], implicit [[V_CVT_I32_F64_e32_27]], implicit [[V_CVT_I32_F64_e32_28]], implicit [[V_CVT_I32_F64_e32_29]] - ; GFX908-NEXT: [[DEF31:%[0-9]+]]:agpr_32 = IMPLICIT_DEF + ; GFX908-NEXT: [[V_CVT_I32_F64_e32_32:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 27, implicit $exec, implicit $mode + ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_25]], implicit [[V_CVT_I32_F64_e32_26]], implicit [[V_CVT_I32_F64_e32_32]], implicit [[V_CVT_I32_F64_e32_27]], implicit [[V_CVT_I32_F64_e32_28]] ; GFX908-NEXT: [[DEF32:%[0-9]+]]:agpr_32 = IMPLICIT_DEF - ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_30]], implicit [[V_CVT_I32_F64_e32_31]], implicit [[DEF31]], implicit [[DEF32]], implicit [[DEF]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF1]], implicit [[DEF2]], implicit [[DEF3]], implicit [[DEF4]], implicit [[DEF5]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF6]], implicit [[DEF7]], implicit [[DEF8]], implicit [[DEF9]], implicit [[DEF10]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF11]], implicit [[DEF12]], implicit [[DEF13]], implicit [[DEF14]], implicit [[DEF15]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF16]], implicit [[DEF17]], implicit [[DEF18]], implicit [[DEF19]], implicit [[DEF20]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF21]], implicit [[DEF22]], implicit [[DEF23]], implicit [[DEF24]], implicit [[DEF25]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF26]], implicit [[DEF27]], implicit [[DEF28]], implicit [[DEF29]], implicit [[V_CVT_I32_F64_e32_32]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF30]] + ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_29]], implicit [[V_CVT_I32_F64_e32_30]], implicit [[DEF32]], implicit [[DEF]], implicit [[DEF1]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF2]], implicit [[DEF3]], implicit [[DEF4]], implicit [[DEF5]], implicit [[DEF6]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF7]], implicit [[DEF8]], implicit [[DEF9]], implicit [[DEF10]], implicit [[DEF11]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF12]], implicit [[DEF13]], implicit [[DEF14]], implicit [[DEF15]], implicit [[DEF16]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF17]], implicit [[DEF18]], implicit [[DEF19]], implicit [[DEF20]], implicit [[DEF21]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF22]], implicit [[DEF23]], implicit [[DEF24]], implicit [[DEF25]], implicit [[DEF26]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF27]], implicit [[DEF28]], implicit [[DEF29]], implicit [[DEF30]], implicit [[V_CVT_I32_F64_e32_31]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF31]] ; GFX908-NEXT: S_ENDPGM 0 ; ; GFX90A-LABEL: name: reduce_arch_and_acc_vgrp_spill @@ -1358,8 +1296,7 @@ body: | ; GFX908-NEXT: [[V_CVT_I32_F64_e32_252:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 252, implicit $exec, implicit $mode, implicit-def $m0 ; GFX908-NEXT: [[V_CVT_I32_F64_e32_253:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 253, implicit $exec, implicit $mode, implicit-def $m0 ; GFX908-NEXT: [[V_CVT_I32_F64_e32_254:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 254, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_255:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 255, implicit $exec, implicit $mode - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_256:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 256, implicit $exec, implicit $mode + ; GFX908-NEXT: [[V_CVT_I32_F64_e32_255:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 256, implicit $exec, implicit $mode ; GFX908-NEXT: {{ $}} ; GFX908-NEXT: bb.1: ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_]], implicit [[V_CVT_I32_F64_e32_1]], implicit [[V_CVT_I32_F64_e32_2]], implicit [[V_CVT_I32_F64_e32_3]], implicit [[V_CVT_I32_F64_e32_4]], implicit [[V_CVT_I32_F64_e32_5]], implicit [[V_CVT_I32_F64_e32_6]], implicit [[V_CVT_I32_F64_e32_7]], implicit [[V_CVT_I32_F64_e32_8]], implicit [[V_CVT_I32_F64_e32_9]] @@ -1387,7 +1324,8 @@ body: | ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_220]], implicit [[V_CVT_I32_F64_e32_221]], implicit [[V_CVT_I32_F64_e32_222]], implicit [[V_CVT_I32_F64_e32_223]], implicit [[V_CVT_I32_F64_e32_224]], implicit [[V_CVT_I32_F64_e32_225]], implicit [[V_CVT_I32_F64_e32_226]], implicit [[V_CVT_I32_F64_e32_227]], implicit [[V_CVT_I32_F64_e32_228]], implicit [[V_CVT_I32_F64_e32_229]] ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_230]], implicit [[V_CVT_I32_F64_e32_231]], implicit [[V_CVT_I32_F64_e32_232]], implicit [[V_CVT_I32_F64_e32_233]], implicit [[V_CVT_I32_F64_e32_234]], implicit [[V_CVT_I32_F64_e32_235]], implicit [[V_CVT_I32_F64_e32_236]], implicit [[V_CVT_I32_F64_e32_237]], implicit [[V_CVT_I32_F64_e32_238]], implicit [[V_CVT_I32_F64_e32_239]] ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_240]], implicit [[V_CVT_I32_F64_e32_241]], implicit [[V_CVT_I32_F64_e32_242]], implicit [[V_CVT_I32_F64_e32_243]], implicit [[V_CVT_I32_F64_e32_244]], implicit [[V_CVT_I32_F64_e32_245]], implicit [[V_CVT_I32_F64_e32_246]], implicit [[V_CVT_I32_F64_e32_247]], implicit [[V_CVT_I32_F64_e32_248]], implicit [[V_CVT_I32_F64_e32_249]] - ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_250]], implicit [[V_CVT_I32_F64_e32_251]], implicit [[V_CVT_I32_F64_e32_252]], implicit [[V_CVT_I32_F64_e32_253]], implicit [[V_CVT_I32_F64_e32_254]], implicit [[V_CVT_I32_F64_e32_255]], implicit [[V_CVT_I32_F64_e32_256]], implicit [[DEF]] + ; GFX908-NEXT: [[V_CVT_I32_F64_e32_256:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 255, implicit $exec, implicit $mode + ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_250]], implicit [[V_CVT_I32_F64_e32_251]], implicit [[V_CVT_I32_F64_e32_252]], implicit [[V_CVT_I32_F64_e32_253]], implicit [[V_CVT_I32_F64_e32_254]], implicit [[V_CVT_I32_F64_e32_256]], implicit [[V_CVT_I32_F64_e32_255]], implicit [[DEF]] ; GFX908-NEXT: S_ENDPGM 0 ; ; GFX90A-LABEL: name: reduce_spill_archvgpr_above_addressable_limit @@ -1395,6 +1333,7 @@ body: | ; GFX90A-NEXT: successors: %bb.1(0x80000000) ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 0, implicit $exec, implicit $mode, implicit-def $m0 + ; GFX90A-NEXT: [[DEF:%[0-9]+]]:agpr_32 = IMPLICIT_DEF ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_1:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 1, implicit $exec, implicit $mode, implicit-def $m0 ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_2:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 2, implicit $exec, implicit $mode, implicit-def $m0 ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_3:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 3, implicit $exec, implicit $mode, implicit-def $m0 @@ -1650,8 +1589,6 @@ body: | ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_253:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 253, implicit $exec, implicit $mode, implicit-def $m0 ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_254:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 254, implicit $exec, implicit $mode, implicit-def $m0 ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_255:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 256, implicit $exec, implicit $mode - ; GFX90A-NEXT: [[DEF:%[0-9]+]]:agpr_32 = IMPLICIT_DEF - ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_256:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 255, implicit $exec, implicit $mode ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.1: ; GFX90A-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_]], implicit [[V_CVT_I32_F64_e32_1]], implicit [[V_CVT_I32_F64_e32_2]], implicit [[V_CVT_I32_F64_e32_3]], implicit [[V_CVT_I32_F64_e32_4]], implicit [[V_CVT_I32_F64_e32_5]], implicit [[V_CVT_I32_F64_e32_6]], implicit [[V_CVT_I32_F64_e32_7]], implicit [[V_CVT_I32_F64_e32_8]], implicit [[V_CVT_I32_F64_e32_9]] @@ -1679,6 +1616,7 @@ body: | ; GFX90A-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_220]], implicit [[V_CVT_I32_F64_e32_221]], implicit [[V_CVT_I32_F64_e32_222]], implicit [[V_CVT_I32_F64_e32_223]], implicit [[V_CVT_I32_F64_e32_224]], implicit [[V_CVT_I32_F64_e32_225]], implicit [[V_CVT_I32_F64_e32_226]], implicit [[V_CVT_I32_F64_e32_227]], implicit [[V_CVT_I32_F64_e32_228]], implicit [[V_CVT_I32_F64_e32_229]] ; GFX90A-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_230]], implicit [[V_CVT_I32_F64_e32_231]], implicit [[V_CVT_I32_F64_e32_232]], implicit [[V_CVT_I32_F64_e32_233]], implicit [[V_CVT_I32_F64_e32_234]], implicit [[V_CVT_I32_F64_e32_235]], implicit [[V_CVT_I32_F64_e32_236]], implicit [[V_CVT_I32_F64_e32_237]], implicit [[V_CVT_I32_F64_e32_238]], implicit [[V_CVT_I32_F64_e32_239]] ; GFX90A-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_240]], implicit [[V_CVT_I32_F64_e32_241]], implicit [[V_CVT_I32_F64_e32_242]], implicit [[V_CVT_I32_F64_e32_243]], implicit [[V_CVT_I32_F64_e32_244]], implicit [[V_CVT_I32_F64_e32_245]], implicit [[V_CVT_I32_F64_e32_246]], implicit [[V_CVT_I32_F64_e32_247]], implicit [[V_CVT_I32_F64_e32_248]], implicit [[V_CVT_I32_F64_e32_249]] + ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_256:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 255, implicit $exec, implicit $mode ; GFX90A-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_250]], implicit [[V_CVT_I32_F64_e32_251]], implicit [[V_CVT_I32_F64_e32_252]], implicit [[V_CVT_I32_F64_e32_253]], implicit [[V_CVT_I32_F64_e32_254]], implicit [[V_CVT_I32_F64_e32_256]], implicit [[V_CVT_I32_F64_e32_255]], implicit [[DEF]] ; GFX90A-NEXT: S_ENDPGM 0 bb.0: @@ -2246,35 +2184,35 @@ body: | ; GFX908-NEXT: [[DEF253:%[0-9]+]]:agpr_32 = IMPLICIT_DEF ; GFX908-NEXT: [[DEF254:%[0-9]+]]:agpr_32 = IMPLICIT_DEF ; GFX908-NEXT: [[DEF255:%[0-9]+]]:agpr_32 = IMPLICIT_DEF - ; GFX908-NEXT: [[DEF256:%[0-9]+]]:agpr_32 = IMPLICIT_DEF ; GFX908-NEXT: {{ $}} ; GFX908-NEXT: bb.1: - ; GFX908-NEXT: S_NOP 0, implicit [[DEF128]], implicit [[DEF129]], implicit [[DEF130]], implicit [[DEF131]], implicit [[DEF132]], implicit [[DEF133]], implicit [[DEF134]], implicit [[DEF135]], implicit [[DEF136]], implicit [[DEF137]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF138]], implicit [[DEF139]], implicit [[DEF140]], implicit [[DEF141]], implicit [[DEF142]], implicit [[DEF143]], implicit [[DEF144]], implicit [[DEF145]], implicit [[DEF146]], implicit [[DEF147]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF148]], implicit [[DEF149]], implicit [[DEF150]], implicit [[DEF151]], implicit [[DEF152]], implicit [[DEF153]], implicit [[DEF154]], implicit [[DEF155]], implicit [[DEF156]], implicit [[DEF157]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF158]], implicit [[DEF159]], implicit [[DEF160]], implicit [[DEF161]], implicit [[DEF162]], implicit [[DEF163]], implicit [[DEF164]], implicit [[DEF165]], implicit [[DEF166]], implicit [[DEF167]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF168]], implicit [[DEF169]], implicit [[DEF170]], implicit [[DEF171]], implicit [[DEF172]], implicit [[DEF173]], implicit [[DEF174]], implicit [[DEF175]], implicit [[DEF176]], implicit [[DEF177]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF178]], implicit [[DEF179]], implicit [[DEF180]], implicit [[DEF181]], implicit [[DEF182]], implicit [[DEF183]], implicit [[DEF184]], implicit [[DEF185]], implicit [[DEF186]], implicit [[DEF187]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF188]], implicit [[DEF189]], implicit [[DEF190]], implicit [[DEF191]], implicit [[DEF192]], implicit [[DEF193]], implicit [[DEF194]], implicit [[DEF195]], implicit [[DEF196]], implicit [[DEF197]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF198]], implicit [[DEF199]], implicit [[DEF200]], implicit [[DEF201]], implicit [[DEF202]], implicit [[DEF203]], implicit [[DEF204]], implicit [[DEF205]], implicit [[DEF206]], implicit [[DEF207]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF208]], implicit [[DEF209]], implicit [[DEF210]], implicit [[DEF211]], implicit [[DEF212]], implicit [[DEF213]], implicit [[DEF214]], implicit [[DEF215]], implicit [[DEF216]], implicit [[DEF217]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF218]], implicit [[DEF219]], implicit [[DEF220]], implicit [[DEF221]], implicit [[DEF222]], implicit [[DEF223]], implicit [[DEF224]], implicit [[DEF225]], implicit [[DEF226]], implicit [[DEF227]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF228]], implicit [[DEF229]], implicit [[DEF230]], implicit [[DEF231]], implicit [[DEF232]], implicit [[DEF233]], implicit [[DEF234]], implicit [[DEF235]], implicit [[DEF236]], implicit [[DEF237]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF238]], implicit [[DEF239]], implicit [[DEF240]], implicit [[DEF241]], implicit [[DEF242]], implicit [[DEF243]], implicit [[DEF244]], implicit [[DEF245]], implicit [[DEF246]], implicit [[DEF247]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF248]], implicit [[DEF249]], implicit [[DEF250]], implicit [[DEF251]], implicit [[DEF252]], implicit [[DEF253]], implicit [[DEF254]], implicit [[DEF255]], implicit [[DEF256]], implicit [[DEF]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF1]], implicit [[DEF2]], implicit [[DEF3]], implicit [[DEF4]], implicit [[DEF5]], implicit [[DEF6]], implicit [[DEF7]], implicit [[DEF8]], implicit [[DEF9]], implicit [[DEF10]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF11]], implicit [[DEF12]], implicit [[DEF13]], implicit [[DEF14]], implicit [[DEF15]], implicit [[DEF16]], implicit [[DEF17]], implicit [[DEF18]], implicit [[DEF19]], implicit [[DEF20]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF21]], implicit [[DEF22]], implicit [[DEF23]], implicit [[DEF24]], implicit [[DEF25]], implicit [[DEF26]], implicit [[DEF27]], implicit [[DEF28]], implicit [[DEF29]], implicit [[DEF30]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF31]], implicit [[DEF32]], implicit [[DEF33]], implicit [[DEF34]], implicit [[DEF35]], implicit [[DEF36]], implicit [[DEF37]], implicit [[DEF38]], implicit [[DEF39]], implicit [[DEF40]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF41]], implicit [[DEF42]], implicit [[DEF43]], implicit [[DEF44]], implicit [[DEF45]], implicit [[DEF46]], implicit [[DEF47]], implicit [[DEF48]], implicit [[DEF49]], implicit [[DEF50]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF51]], implicit [[DEF52]], implicit [[DEF53]], implicit [[DEF54]], implicit [[DEF55]], implicit [[DEF56]], implicit [[DEF57]], implicit [[DEF58]], implicit [[DEF59]], implicit [[DEF60]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF61]], implicit [[DEF62]], implicit [[DEF63]], implicit [[DEF64]], implicit [[DEF65]], implicit [[DEF66]], implicit [[DEF67]], implicit [[DEF68]], implicit [[DEF69]], implicit [[DEF70]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF71]], implicit [[DEF72]], implicit [[DEF73]], implicit [[DEF74]], implicit [[DEF75]], implicit [[DEF76]], implicit [[DEF77]], implicit [[DEF78]], implicit [[DEF79]], implicit [[DEF80]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF81]], implicit [[DEF82]], implicit [[DEF83]], implicit [[DEF84]], implicit [[DEF85]], implicit [[DEF86]], implicit [[DEF87]], implicit [[DEF88]], implicit [[DEF89]], implicit [[DEF90]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF91]], implicit [[DEF92]], implicit [[DEF93]], implicit [[DEF94]], implicit [[DEF95]], implicit [[DEF96]], implicit [[DEF97]], implicit [[DEF98]], implicit [[DEF99]], implicit [[DEF100]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF101]], implicit [[DEF102]], implicit [[DEF103]], implicit [[DEF104]], implicit [[DEF105]], implicit [[DEF106]], implicit [[DEF107]], implicit [[DEF108]], implicit [[DEF109]], implicit [[DEF110]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF111]], implicit [[DEF112]], implicit [[DEF113]], implicit [[DEF114]], implicit [[DEF115]], implicit [[DEF116]], implicit [[DEF117]], implicit [[DEF118]], implicit [[DEF119]], implicit [[DEF120]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF121]], implicit [[DEF122]], implicit [[DEF123]], implicit [[DEF124]], implicit [[DEF125]], implicit [[DEF126]], implicit [[DEF127]], implicit [[V_CVT_I32_F64_e32_]], implicit [[V_CVT_I32_F64_e32_1]] + ; GFX908-NEXT: [[DEF256:%[0-9]+]]:agpr_32 = IMPLICIT_DEF + ; GFX908-NEXT: S_NOP 0, implicit [[DEF256]], implicit [[DEF]], implicit [[DEF1]], implicit [[DEF2]], implicit [[DEF3]], implicit [[DEF4]], implicit [[DEF5]], implicit [[DEF6]], implicit [[DEF7]], implicit [[DEF8]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF9]], implicit [[DEF10]], implicit [[DEF11]], implicit [[DEF12]], implicit [[DEF13]], implicit [[DEF14]], implicit [[DEF15]], implicit [[DEF16]], implicit [[DEF17]], implicit [[DEF18]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF19]], implicit [[DEF20]], implicit [[DEF21]], implicit [[DEF22]], implicit [[DEF23]], implicit [[DEF24]], implicit [[DEF25]], implicit [[DEF26]], implicit [[DEF27]], implicit [[DEF28]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF29]], implicit [[DEF30]], implicit [[DEF31]], implicit [[DEF32]], implicit [[DEF33]], implicit [[DEF34]], implicit [[DEF35]], implicit [[DEF36]], implicit [[DEF37]], implicit [[DEF38]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF39]], implicit [[DEF40]], implicit [[DEF41]], implicit [[DEF42]], implicit [[DEF43]], implicit [[DEF44]], implicit [[DEF45]], implicit [[DEF46]], implicit [[DEF47]], implicit [[DEF48]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF49]], implicit [[DEF50]], implicit [[DEF51]], implicit [[DEF52]], implicit [[DEF53]], implicit [[DEF54]], implicit [[DEF55]], implicit [[DEF56]], implicit [[DEF57]], implicit [[DEF58]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF59]], implicit [[DEF60]], implicit [[DEF61]], implicit [[DEF62]], implicit [[DEF63]], implicit [[DEF64]], implicit [[DEF65]], implicit [[DEF66]], implicit [[DEF67]], implicit [[DEF68]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF69]], implicit [[DEF70]], implicit [[DEF71]], implicit [[DEF72]], implicit [[DEF73]], implicit [[DEF74]], implicit [[DEF75]], implicit [[DEF76]], implicit [[DEF77]], implicit [[DEF78]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF79]], implicit [[DEF80]], implicit [[DEF81]], implicit [[DEF82]], implicit [[DEF83]], implicit [[DEF84]], implicit [[DEF85]], implicit [[DEF86]], implicit [[DEF87]], implicit [[DEF88]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF89]], implicit [[DEF90]], implicit [[DEF91]], implicit [[DEF92]], implicit [[DEF93]], implicit [[DEF94]], implicit [[DEF95]], implicit [[DEF96]], implicit [[DEF97]], implicit [[DEF98]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF99]], implicit [[DEF100]], implicit [[DEF101]], implicit [[DEF102]], implicit [[DEF103]], implicit [[DEF104]], implicit [[DEF105]], implicit [[DEF106]], implicit [[DEF107]], implicit [[DEF108]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF109]], implicit [[DEF110]], implicit [[DEF111]], implicit [[DEF112]], implicit [[DEF113]], implicit [[DEF114]], implicit [[DEF115]], implicit [[DEF116]], implicit [[DEF117]], implicit [[DEF118]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF119]], implicit [[DEF120]], implicit [[DEF121]], implicit [[DEF122]], implicit [[DEF123]], implicit [[DEF124]], implicit [[DEF125]], implicit [[DEF126]], implicit [[DEF127]], implicit [[DEF128]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF129]], implicit [[DEF130]], implicit [[DEF131]], implicit [[DEF132]], implicit [[DEF133]], implicit [[DEF134]], implicit [[DEF135]], implicit [[DEF136]], implicit [[DEF137]], implicit [[DEF138]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF139]], implicit [[DEF140]], implicit [[DEF141]], implicit [[DEF142]], implicit [[DEF143]], implicit [[DEF144]], implicit [[DEF145]], implicit [[DEF146]], implicit [[DEF147]], implicit [[DEF148]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF149]], implicit [[DEF150]], implicit [[DEF151]], implicit [[DEF152]], implicit [[DEF153]], implicit [[DEF154]], implicit [[DEF155]], implicit [[DEF156]], implicit [[DEF157]], implicit [[DEF158]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF159]], implicit [[DEF160]], implicit [[DEF161]], implicit [[DEF162]], implicit [[DEF163]], implicit [[DEF164]], implicit [[DEF165]], implicit [[DEF166]], implicit [[DEF167]], implicit [[DEF168]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF169]], implicit [[DEF170]], implicit [[DEF171]], implicit [[DEF172]], implicit [[DEF173]], implicit [[DEF174]], implicit [[DEF175]], implicit [[DEF176]], implicit [[DEF177]], implicit [[DEF178]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF179]], implicit [[DEF180]], implicit [[DEF181]], implicit [[DEF182]], implicit [[DEF183]], implicit [[DEF184]], implicit [[DEF185]], implicit [[DEF186]], implicit [[DEF187]], implicit [[DEF188]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF189]], implicit [[DEF190]], implicit [[DEF191]], implicit [[DEF192]], implicit [[DEF193]], implicit [[DEF194]], implicit [[DEF195]], implicit [[DEF196]], implicit [[DEF197]], implicit [[DEF198]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF199]], implicit [[DEF200]], implicit [[DEF201]], implicit [[DEF202]], implicit [[DEF203]], implicit [[DEF204]], implicit [[DEF205]], implicit [[DEF206]], implicit [[DEF207]], implicit [[DEF208]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF209]], implicit [[DEF210]], implicit [[DEF211]], implicit [[DEF212]], implicit [[DEF213]], implicit [[DEF214]], implicit [[DEF215]], implicit [[DEF216]], implicit [[DEF217]], implicit [[DEF218]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF219]], implicit [[DEF220]], implicit [[DEF221]], implicit [[DEF222]], implicit [[DEF223]], implicit [[DEF224]], implicit [[DEF225]], implicit [[DEF226]], implicit [[DEF227]], implicit [[DEF228]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF229]], implicit [[DEF230]], implicit [[DEF231]], implicit [[DEF232]], implicit [[DEF233]], implicit [[DEF234]], implicit [[DEF235]], implicit [[DEF236]], implicit [[DEF237]], implicit [[DEF238]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF239]], implicit [[DEF240]], implicit [[DEF241]], implicit [[DEF242]], implicit [[DEF243]], implicit [[DEF244]], implicit [[DEF245]], implicit [[DEF246]], implicit [[DEF247]], implicit [[DEF248]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF249]], implicit [[DEF250]], implicit [[DEF251]], implicit [[DEF252]], implicit [[DEF253]], implicit [[DEF254]], implicit [[DEF255]], implicit [[V_CVT_I32_F64_e32_]], implicit [[V_CVT_I32_F64_e32_1]] ; GFX908-NEXT: S_ENDPGM 0 ; ; GFX90A-LABEL: name: reduce_spill_agpr_above_addressable_limit @@ -2533,41 +2471,41 @@ body: | ; GFX90A-NEXT: [[DEF249:%[0-9]+]]:agpr_32 = IMPLICIT_DEF ; GFX90A-NEXT: [[DEF250:%[0-9]+]]:agpr_32 = IMPLICIT_DEF ; GFX90A-NEXT: [[DEF251:%[0-9]+]]:agpr_32 = IMPLICIT_DEF - ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 257, implicit $exec, implicit $mode - ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_1:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 258, implicit $exec, implicit $mode ; GFX90A-NEXT: [[DEF252:%[0-9]+]]:agpr_32 = IMPLICIT_DEF ; GFX90A-NEXT: [[DEF253:%[0-9]+]]:agpr_32 = IMPLICIT_DEF ; GFX90A-NEXT: [[DEF254:%[0-9]+]]:agpr_32 = IMPLICIT_DEF ; GFX90A-NEXT: [[DEF255:%[0-9]+]]:agpr_32 = IMPLICIT_DEF - ; GFX90A-NEXT: [[DEF256:%[0-9]+]]:agpr_32 = IMPLICIT_DEF + ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 257, implicit $exec, implicit $mode + ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_1:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 258, implicit $exec, implicit $mode ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.1: - ; GFX90A-NEXT: S_NOP 0, implicit [[DEF252]], implicit [[DEF253]], implicit [[DEF254]], implicit [[DEF255]], implicit [[DEF256]], implicit [[DEF]], implicit [[DEF1]], implicit [[DEF2]], implicit [[DEF3]], implicit [[DEF4]] - ; GFX90A-NEXT: S_NOP 0, implicit [[DEF5]], implicit [[DEF6]], implicit [[DEF7]], implicit [[DEF8]], implicit [[DEF9]], implicit [[DEF10]], implicit [[DEF11]], implicit [[DEF12]], implicit [[DEF13]], implicit [[DEF14]] - ; GFX90A-NEXT: S_NOP 0, implicit [[DEF15]], implicit [[DEF16]], implicit [[DEF17]], implicit [[DEF18]], implicit [[DEF19]], implicit [[DEF20]], implicit [[DEF21]], implicit [[DEF22]], implicit [[DEF23]], implicit [[DEF24]] - ; GFX90A-NEXT: S_NOP 0, implicit [[DEF25]], implicit [[DEF26]], implicit [[DEF27]], implicit [[DEF28]], implicit [[DEF29]], implicit [[DEF30]], implicit [[DEF31]], implicit [[DEF32]], implicit [[DEF33]], implicit [[DEF34]] - ; GFX90A-NEXT: S_NOP 0, implicit [[DEF35]], implicit [[DEF36]], implicit [[DEF37]], implicit [[DEF38]], implicit [[DEF39]], implicit [[DEF40]], implicit [[DEF41]], implicit [[DEF42]], implicit [[DEF43]], implicit [[DEF44]] - ; GFX90A-NEXT: S_NOP 0, implicit [[DEF45]], implicit [[DEF46]], implicit [[DEF47]], implicit [[DEF48]], implicit [[DEF49]], implicit [[DEF50]], implicit [[DEF51]], implicit [[DEF52]], implicit [[DEF53]], implicit [[DEF54]] - ; GFX90A-NEXT: S_NOP 0, implicit [[DEF55]], implicit [[DEF56]], implicit [[DEF57]], implicit [[DEF58]], implicit [[DEF59]], implicit [[DEF60]], implicit [[DEF61]], implicit [[DEF62]], implicit [[DEF63]], implicit [[DEF64]] - ; GFX90A-NEXT: S_NOP 0, implicit [[DEF65]], implicit [[DEF66]], implicit [[DEF67]], implicit [[DEF68]], implicit [[DEF69]], implicit [[DEF70]], implicit [[DEF71]], implicit [[DEF72]], implicit [[DEF73]], implicit [[DEF74]] - ; GFX90A-NEXT: S_NOP 0, implicit [[DEF75]], implicit [[DEF76]], implicit [[DEF77]], implicit [[DEF78]], implicit [[DEF79]], implicit [[DEF80]], implicit [[DEF81]], implicit [[DEF82]], implicit [[DEF83]], implicit [[DEF84]] - ; GFX90A-NEXT: S_NOP 0, implicit [[DEF85]], implicit [[DEF86]], implicit [[DEF87]], implicit [[DEF88]], implicit [[DEF89]], implicit [[DEF90]], implicit [[DEF91]], implicit [[DEF92]], implicit [[DEF93]], implicit [[DEF94]] - ; GFX90A-NEXT: S_NOP 0, implicit [[DEF95]], implicit [[DEF96]], implicit [[DEF97]], implicit [[DEF98]], implicit [[DEF99]], implicit [[DEF100]], implicit [[DEF101]], implicit [[DEF102]], implicit [[DEF103]], implicit [[DEF104]] - ; GFX90A-NEXT: S_NOP 0, implicit [[DEF105]], implicit [[DEF106]], implicit [[DEF107]], implicit [[DEF108]], implicit [[DEF109]], implicit [[DEF110]], implicit [[DEF111]], implicit [[DEF112]], implicit [[DEF113]], implicit [[DEF114]] - ; GFX90A-NEXT: S_NOP 0, implicit [[DEF115]], implicit [[DEF116]], implicit [[DEF117]], implicit [[DEF118]], implicit [[DEF119]], implicit [[DEF120]], implicit [[DEF121]], implicit [[DEF122]], implicit [[DEF123]], implicit [[DEF124]] - ; GFX90A-NEXT: S_NOP 0, implicit [[DEF125]], implicit [[DEF126]], implicit [[DEF127]], implicit [[DEF128]], implicit [[DEF129]], implicit [[DEF130]], implicit [[DEF131]], implicit [[DEF132]], implicit [[DEF133]], implicit [[DEF134]] - ; GFX90A-NEXT: S_NOP 0, implicit [[DEF135]], implicit [[DEF136]], implicit [[DEF137]], implicit [[DEF138]], implicit [[DEF139]], implicit [[DEF140]], implicit [[DEF141]], implicit [[DEF142]], implicit [[DEF143]], implicit [[DEF144]] - ; GFX90A-NEXT: S_NOP 0, implicit [[DEF145]], implicit [[DEF146]], implicit [[DEF147]], implicit [[DEF148]], implicit [[DEF149]], implicit [[DEF150]], implicit [[DEF151]], implicit [[DEF152]], implicit [[DEF153]], implicit [[DEF154]] - ; GFX90A-NEXT: S_NOP 0, implicit [[DEF155]], implicit [[DEF156]], implicit [[DEF157]], implicit [[DEF158]], implicit [[DEF159]], implicit [[DEF160]], implicit [[DEF161]], implicit [[DEF162]], implicit [[DEF163]], implicit [[DEF164]] - ; GFX90A-NEXT: S_NOP 0, implicit [[DEF165]], implicit [[DEF166]], implicit [[DEF167]], implicit [[DEF168]], implicit [[DEF169]], implicit [[DEF170]], implicit [[DEF171]], implicit [[DEF172]], implicit [[DEF173]], implicit [[DEF174]] - ; GFX90A-NEXT: S_NOP 0, implicit [[DEF175]], implicit [[DEF176]], implicit [[DEF177]], implicit [[DEF178]], implicit [[DEF179]], implicit [[DEF180]], implicit [[DEF181]], implicit [[DEF182]], implicit [[DEF183]], implicit [[DEF184]] - ; GFX90A-NEXT: S_NOP 0, implicit [[DEF185]], implicit [[DEF186]], implicit [[DEF187]], implicit [[DEF188]], implicit [[DEF189]], implicit [[DEF190]], implicit [[DEF191]], implicit [[DEF192]], implicit [[DEF193]], implicit [[DEF194]] - ; GFX90A-NEXT: S_NOP 0, implicit [[DEF195]], implicit [[DEF196]], implicit [[DEF197]], implicit [[DEF198]], implicit [[DEF199]], implicit [[DEF200]], implicit [[DEF201]], implicit [[DEF202]], implicit [[DEF203]], implicit [[DEF204]] - ; GFX90A-NEXT: S_NOP 0, implicit [[DEF205]], implicit [[DEF206]], implicit [[DEF207]], implicit [[DEF208]], implicit [[DEF209]], implicit [[DEF210]], implicit [[DEF211]], implicit [[DEF212]], implicit [[DEF213]], implicit [[DEF214]] - ; GFX90A-NEXT: S_NOP 0, implicit [[DEF215]], implicit [[DEF216]], implicit [[DEF217]], implicit [[DEF218]], implicit [[DEF219]], implicit [[DEF220]], implicit [[DEF221]], implicit [[DEF222]], implicit [[DEF223]], implicit [[DEF224]] - ; GFX90A-NEXT: S_NOP 0, implicit [[DEF225]], implicit [[DEF226]], implicit [[DEF227]], implicit [[DEF228]], implicit [[DEF229]], implicit [[DEF230]], implicit [[DEF231]], implicit [[DEF232]], implicit [[DEF233]], implicit [[DEF234]] - ; GFX90A-NEXT: S_NOP 0, implicit [[DEF235]], implicit [[DEF236]], implicit [[DEF237]], implicit [[DEF238]], implicit [[DEF239]], implicit [[DEF240]], implicit [[DEF241]], implicit [[DEF242]], implicit [[DEF243]], implicit [[DEF244]] - ; GFX90A-NEXT: S_NOP 0, implicit [[DEF245]], implicit [[DEF246]], implicit [[DEF247]], implicit [[DEF248]], implicit [[DEF249]], implicit [[DEF250]], implicit [[DEF251]], implicit [[V_CVT_I32_F64_e32_]], implicit [[V_CVT_I32_F64_e32_1]] + ; GFX90A-NEXT: [[DEF256:%[0-9]+]]:agpr_32 = IMPLICIT_DEF + ; GFX90A-NEXT: S_NOP 0, implicit [[DEF256]], implicit [[DEF]], implicit [[DEF1]], implicit [[DEF2]], implicit [[DEF3]], implicit [[DEF4]], implicit [[DEF5]], implicit [[DEF6]], implicit [[DEF7]], implicit [[DEF8]] + ; GFX90A-NEXT: S_NOP 0, implicit [[DEF9]], implicit [[DEF10]], implicit [[DEF11]], implicit [[DEF12]], implicit [[DEF13]], implicit [[DEF14]], implicit [[DEF15]], implicit [[DEF16]], implicit [[DEF17]], implicit [[DEF18]] + ; GFX90A-NEXT: S_NOP 0, implicit [[DEF19]], implicit [[DEF20]], implicit [[DEF21]], implicit [[DEF22]], implicit [[DEF23]], implicit [[DEF24]], implicit [[DEF25]], implicit [[DEF26]], implicit [[DEF27]], implicit [[DEF28]] + ; GFX90A-NEXT: S_NOP 0, implicit [[DEF29]], implicit [[DEF30]], implicit [[DEF31]], implicit [[DEF32]], implicit [[DEF33]], implicit [[DEF34]], implicit [[DEF35]], implicit [[DEF36]], implicit [[DEF37]], implicit [[DEF38]] + ; GFX90A-NEXT: S_NOP 0, implicit [[DEF39]], implicit [[DEF40]], implicit [[DEF41]], implicit [[DEF42]], implicit [[DEF43]], implicit [[DEF44]], implicit [[DEF45]], implicit [[DEF46]], implicit [[DEF47]], implicit [[DEF48]] + ; GFX90A-NEXT: S_NOP 0, implicit [[DEF49]], implicit [[DEF50]], implicit [[DEF51]], implicit [[DEF52]], implicit [[DEF53]], implicit [[DEF54]], implicit [[DEF55]], implicit [[DEF56]], implicit [[DEF57]], implicit [[DEF58]] + ; GFX90A-NEXT: S_NOP 0, implicit [[DEF59]], implicit [[DEF60]], implicit [[DEF61]], implicit [[DEF62]], implicit [[DEF63]], implicit [[DEF64]], implicit [[DEF65]], implicit [[DEF66]], implicit [[DEF67]], implicit [[DEF68]] + ; GFX90A-NEXT: S_NOP 0, implicit [[DEF69]], implicit [[DEF70]], implicit [[DEF71]], implicit [[DEF72]], implicit [[DEF73]], implicit [[DEF74]], implicit [[DEF75]], implicit [[DEF76]], implicit [[DEF77]], implicit [[DEF78]] + ; GFX90A-NEXT: S_NOP 0, implicit [[DEF79]], implicit [[DEF80]], implicit [[DEF81]], implicit [[DEF82]], implicit [[DEF83]], implicit [[DEF84]], implicit [[DEF85]], implicit [[DEF86]], implicit [[DEF87]], implicit [[DEF88]] + ; GFX90A-NEXT: S_NOP 0, implicit [[DEF89]], implicit [[DEF90]], implicit [[DEF91]], implicit [[DEF92]], implicit [[DEF93]], implicit [[DEF94]], implicit [[DEF95]], implicit [[DEF96]], implicit [[DEF97]], implicit [[DEF98]] + ; GFX90A-NEXT: S_NOP 0, implicit [[DEF99]], implicit [[DEF100]], implicit [[DEF101]], implicit [[DEF102]], implicit [[DEF103]], implicit [[DEF104]], implicit [[DEF105]], implicit [[DEF106]], implicit [[DEF107]], implicit [[DEF108]] + ; GFX90A-NEXT: S_NOP 0, implicit [[DEF109]], implicit [[DEF110]], implicit [[DEF111]], implicit [[DEF112]], implicit [[DEF113]], implicit [[DEF114]], implicit [[DEF115]], implicit [[DEF116]], implicit [[DEF117]], implicit [[DEF118]] + ; GFX90A-NEXT: S_NOP 0, implicit [[DEF119]], implicit [[DEF120]], implicit [[DEF121]], implicit [[DEF122]], implicit [[DEF123]], implicit [[DEF124]], implicit [[DEF125]], implicit [[DEF126]], implicit [[DEF127]], implicit [[DEF128]] + ; GFX90A-NEXT: S_NOP 0, implicit [[DEF129]], implicit [[DEF130]], implicit [[DEF131]], implicit [[DEF132]], implicit [[DEF133]], implicit [[DEF134]], implicit [[DEF135]], implicit [[DEF136]], implicit [[DEF137]], implicit [[DEF138]] + ; GFX90A-NEXT: S_NOP 0, implicit [[DEF139]], implicit [[DEF140]], implicit [[DEF141]], implicit [[DEF142]], implicit [[DEF143]], implicit [[DEF144]], implicit [[DEF145]], implicit [[DEF146]], implicit [[DEF147]], implicit [[DEF148]] + ; GFX90A-NEXT: S_NOP 0, implicit [[DEF149]], implicit [[DEF150]], implicit [[DEF151]], implicit [[DEF152]], implicit [[DEF153]], implicit [[DEF154]], implicit [[DEF155]], implicit [[DEF156]], implicit [[DEF157]], implicit [[DEF158]] + ; GFX90A-NEXT: S_NOP 0, implicit [[DEF159]], implicit [[DEF160]], implicit [[DEF161]], implicit [[DEF162]], implicit [[DEF163]], implicit [[DEF164]], implicit [[DEF165]], implicit [[DEF166]], implicit [[DEF167]], implicit [[DEF168]] + ; GFX90A-NEXT: S_NOP 0, implicit [[DEF169]], implicit [[DEF170]], implicit [[DEF171]], implicit [[DEF172]], implicit [[DEF173]], implicit [[DEF174]], implicit [[DEF175]], implicit [[DEF176]], implicit [[DEF177]], implicit [[DEF178]] + ; GFX90A-NEXT: S_NOP 0, implicit [[DEF179]], implicit [[DEF180]], implicit [[DEF181]], implicit [[DEF182]], implicit [[DEF183]], implicit [[DEF184]], implicit [[DEF185]], implicit [[DEF186]], implicit [[DEF187]], implicit [[DEF188]] + ; GFX90A-NEXT: S_NOP 0, implicit [[DEF189]], implicit [[DEF190]], implicit [[DEF191]], implicit [[DEF192]], implicit [[DEF193]], implicit [[DEF194]], implicit [[DEF195]], implicit [[DEF196]], implicit [[DEF197]], implicit [[DEF198]] + ; GFX90A-NEXT: S_NOP 0, implicit [[DEF199]], implicit [[DEF200]], implicit [[DEF201]], implicit [[DEF202]], implicit [[DEF203]], implicit [[DEF204]], implicit [[DEF205]], implicit [[DEF206]], implicit [[DEF207]], implicit [[DEF208]] + ; GFX90A-NEXT: S_NOP 0, implicit [[DEF209]], implicit [[DEF210]], implicit [[DEF211]], implicit [[DEF212]], implicit [[DEF213]], implicit [[DEF214]], implicit [[DEF215]], implicit [[DEF216]], implicit [[DEF217]], implicit [[DEF218]] + ; GFX90A-NEXT: S_NOP 0, implicit [[DEF219]], implicit [[DEF220]], implicit [[DEF221]], implicit [[DEF222]], implicit [[DEF223]], implicit [[DEF224]], implicit [[DEF225]], implicit [[DEF226]], implicit [[DEF227]], implicit [[DEF228]] + ; GFX90A-NEXT: S_NOP 0, implicit [[DEF229]], implicit [[DEF230]], implicit [[DEF231]], implicit [[DEF232]], implicit [[DEF233]], implicit [[DEF234]], implicit [[DEF235]], implicit [[DEF236]], implicit [[DEF237]], implicit [[DEF238]] + ; GFX90A-NEXT: S_NOP 0, implicit [[DEF239]], implicit [[DEF240]], implicit [[DEF241]], implicit [[DEF242]], implicit [[DEF243]], implicit [[DEF244]], implicit [[DEF245]], implicit [[DEF246]], implicit [[DEF247]], implicit [[DEF248]] + ; GFX90A-NEXT: S_NOP 0, implicit [[DEF249]], implicit [[DEF250]], implicit [[DEF251]], implicit [[DEF252]], implicit [[DEF253]], implicit [[DEF254]], implicit [[DEF255]], implicit [[V_CVT_I32_F64_e32_]], implicit [[V_CVT_I32_F64_e32_1]] ; GFX90A-NEXT: S_ENDPGM 0 bb.0: diff --git a/llvm/test/CodeGen/AMDGPU/machine-scheduler-sink-trivial-remats.mir b/llvm/test/CodeGen/AMDGPU/machine-scheduler-sink-trivial-remats.mir index f69337e..06d8474 100644 --- a/llvm/test/CodeGen/AMDGPU/machine-scheduler-sink-trivial-remats.mir +++ b/llvm/test/CodeGen/AMDGPU/machine-scheduler-sink-trivial-remats.mir @@ -2104,13 +2104,9 @@ body: | ; GFX908-NEXT: [[S_MOV_B32_58:%[0-9]+]]:sgpr_32 = S_MOV_B32 69 ; GFX908-NEXT: [[S_MOV_B32_59:%[0-9]+]]:sgpr_32 = S_MOV_B32 70 ; GFX908-NEXT: [[S_MOV_B32_60:%[0-9]+]]:sgpr_32 = S_MOV_B32 71 - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_20:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 20, implicit $exec, implicit $mode, implicit-def $m0 ; GFX908-NEXT: [[S_MOV_B32_61:%[0-9]+]]:sgpr_32 = S_MOV_B32 72 - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_21:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 21, implicit $exec, implicit $mode, implicit-def $m0 ; GFX908-NEXT: [[S_MOV_B32_62:%[0-9]+]]:sgpr_32 = S_MOV_B32 73 - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_22:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 22, implicit $exec, implicit $mode, implicit-def $m0 ; GFX908-NEXT: [[S_MOV_B32_63:%[0-9]+]]:sgpr_32 = S_MOV_B32 74 - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_23:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 23, implicit $exec, implicit $mode ; GFX908-NEXT: [[S_MOV_B32_64:%[0-9]+]]:sgpr_32 = S_MOV_B32 75 ; GFX908-NEXT: [[S_MOV_B32_65:%[0-9]+]]:sgpr_32 = S_MOV_B32 76 ; GFX908-NEXT: [[S_MOV_B32_66:%[0-9]+]]:sgpr_32 = S_MOV_B32 77 @@ -2120,7 +2116,11 @@ body: | ; GFX908-NEXT: [[S_MOV_B32_70:%[0-9]+]]:sgpr_32 = S_MOV_B32 81 ; GFX908-NEXT: [[S_MOV_B32_71:%[0-9]+]]:sgpr_32 = S_MOV_B32 82 ; GFX908-NEXT: [[S_MOV_B32_72:%[0-9]+]]:sgpr_32 = S_MOV_B32 83 + ; GFX908-NEXT: [[V_CVT_I32_F64_e32_20:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 20, implicit $exec, implicit $mode, implicit-def $m0 ; GFX908-NEXT: [[S_MOV_B32_73:%[0-9]+]]:sgpr_32 = S_MOV_B32 84 + ; GFX908-NEXT: [[V_CVT_I32_F64_e32_21:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 21, implicit $exec, implicit $mode, implicit-def $m0 + ; GFX908-NEXT: [[V_CVT_I32_F64_e32_22:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 22, implicit $exec, implicit $mode, implicit-def $m0 + ; GFX908-NEXT: [[V_CVT_I32_F64_e32_23:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 23, implicit $exec, implicit $mode ; GFX908-NEXT: {{ $}} ; GFX908-NEXT: bb.1: ; GFX908-NEXT: successors: %bb.2(0x40000000), %bb.3(0x40000000) diff --git a/llvm/test/CodeGen/AMDGPU/mad-mix-bf16.ll b/llvm/test/CodeGen/AMDGPU/mad-mix-bf16.ll index 11cda2d..c96ba75 100644 --- a/llvm/test/CodeGen/AMDGPU/mad-mix-bf16.ll +++ b/llvm/test/CodeGen/AMDGPU/mad-mix-bf16.ll @@ -199,7 +199,6 @@ define float @v_mad_mix_f32_bf16lo_bf16lo_negabsf32(bfloat %src0, bfloat %src1, ret float %result } - define float @v_mad_mix_f32_bf16lo_bf16lo_f32imm1(bfloat %src0, bfloat %src1) #0 { ; GFX1250-LABEL: v_mad_mix_f32_bf16lo_bf16lo_f32imm1: ; GFX1250: ; %bb.0: @@ -230,7 +229,6 @@ define float @v_mad_mix_f32_bf16lo_bf16lo_f32imminv2pi(bfloat %src0, bfloat %src ret float %result } - define float @v_mad_mix_f32_bf16lo_bf16lo_cvtbf16imminv2pi(bfloat %src0, bfloat %src1) #0 { ; GFX1250-LABEL: v_mad_mix_f32_bf16lo_bf16lo_cvtbf16imminv2pi: ; GFX1250: ; %bb.0: @@ -247,7 +245,6 @@ define float @v_mad_mix_f32_bf16lo_bf16lo_cvtbf16imminv2pi(bfloat %src0, bfloat ret float %result } - define float @v_mad_mix_f32_bf16lo_bf16lo_cvtbf16imm63(bfloat %src0, bfloat %src1) #0 { ; GFX1250-LABEL: v_mad_mix_f32_bf16lo_bf16lo_cvtbf16imm63: ; GFX1250: ; %bb.0: @@ -360,7 +357,6 @@ define float @no_mix_simple_fabs(float %src0, float %src1, float %src2) #0 { ret float %result } - define float @v_mad_mix_f32_bf16lo_bf16lo_bf16lo_f32_denormals(bfloat %src0, bfloat %src1, bfloat %src2) #1 { ; GFX1250-LABEL: v_mad_mix_f32_bf16lo_bf16lo_bf16lo_f32_denormals: ; GFX1250: ; %bb.0: @@ -469,7 +465,6 @@ define float @v_mad_mix_f32_negprecvtbf16lo_bf16lo_bf16lo(i32 %src0.arg, bfloat ret float %result } - define float @v_mad_mix_f32_precvtnegbf16hi_abs_bf16lo_bf16lo(i32 %src0.arg, bfloat %src1, bfloat %src2) #0 { ; GFX1250-LABEL: v_mad_mix_f32_precvtnegbf16hi_abs_bf16lo_bf16lo: ; GFX1250: ; %bb.0: diff --git a/llvm/test/CodeGen/AMDGPU/mad-mix-lo-bf16.ll b/llvm/test/CodeGen/AMDGPU/mad-mix-lo-bf16.ll index 4393172..03304ae 100644 --- a/llvm/test/CodeGen/AMDGPU/mad-mix-lo-bf16.ll +++ b/llvm/test/CodeGen/AMDGPU/mad-mix-lo-bf16.ll @@ -76,9 +76,6 @@ define bfloat @v_mad_mixlo_bf16_bf16lo_bf16lo_f32_clamp_post_cvt(bfloat %src0, b ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: v_fma_mixlo_bf16 v0, v0, v1, v2 op_sel_hi:[1,1,0] clamp ; GFX1250-NEXT: s_set_pc_i64 s[30:31] - - - %src0.ext = fpext bfloat %src0 to float %src1.ext = fpext bfloat %src1 to float %result = tail call float @llvm.fmuladd.f32(float %src0.ext, float %src1.ext, float %src2) @@ -106,7 +103,6 @@ define bfloat @v_mad_mixlo_bf16_bf16lo_bf16lo_f32_clamp_pre_cvt(bfloat %src0, bf ret bfloat %cvt.result } - define <2 x bfloat> @v_mad_mix_v2f32(<2 x bfloat> %src0, <2 x bfloat> %src1, <2 x bfloat> %src2) #0 { ; GFX1250-LABEL: v_mad_mix_v2f32: ; GFX1250: ; %bb.0: @@ -179,7 +175,6 @@ define <4 x bfloat> @v_mad_mix_v4f32(<4 x bfloat> %src0, <4 x bfloat> %src1, <4 ret <4 x bfloat> %cvt.result } - define <2 x bfloat> @v_mad_mix_v2f32_clamp_postcvt(<2 x bfloat> %src0, <2 x bfloat> %src1, <2 x bfloat> %src2) #0 { ; GFX1250-LABEL: v_mad_mix_v2f32_clamp_postcvt: ; GFX1250: ; %bb.0: @@ -194,9 +189,6 @@ define <2 x bfloat> @v_mad_mix_v2f32_clamp_postcvt(<2 x bfloat> %src0, <2 x bflo ; GFX1250-NEXT: v_pk_fma_f32 v[0:1], v[4:5], v[6:7], v[0:1] ; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v1 clamp ; GFX1250-NEXT: s_set_pc_i64 s[30:31] - - - %src0.ext = fpext <2 x bfloat> %src0 to <2 x float> %src1.ext = fpext <2 x bfloat> %src1 to <2 x float> %src2.ext = fpext <2 x bfloat> %src2 to <2 x float> @@ -207,7 +199,6 @@ define <2 x bfloat> @v_mad_mix_v2f32_clamp_postcvt(<2 x bfloat> %src0, <2 x bflo ret <2 x bfloat> %clamp } - define <3 x bfloat> @v_mad_mix_v3f32_clamp_postcvt(<3 x bfloat> %src0, <3 x bfloat> %src1, <3 x bfloat> %src2) #0 { ; GFX1250-LABEL: v_mad_mix_v3f32_clamp_postcvt: ; GFX1250: ; %bb.0: @@ -252,9 +243,6 @@ define <4 x bfloat> @v_mad_mix_v4f32_clamp_postcvt(<4 x bfloat> %src0, <4 x bflo ; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v1 clamp ; GFX1250-NEXT: v_cvt_pk_bf16_f32 v1, v2, v3 clamp ; GFX1250-NEXT: s_set_pc_i64 s[30:31] - - - %src0.ext = fpext <4 x bfloat> %src0 to <4 x float> %src1.ext = fpext <4 x bfloat> %src1 to <4 x float> %src2.ext = fpext <4 x bfloat> %src2 to <4 x float> @@ -325,7 +313,6 @@ define <2 x bfloat> @v_mad_mix_v2f32_clamp_postcvt_hi(<2 x bfloat> %src0, <2 x b ret <2 x bfloat> %insert } - define <2 x bfloat> @v_mad_mix_v2f32_clamp_precvt(<2 x bfloat> %src0, <2 x bfloat> %src1, <2 x bfloat> %src2) #0 { ; GFX1250-LABEL: v_mad_mix_v2f32_clamp_precvt: ; GFX1250: ; %bb.0: @@ -353,7 +340,6 @@ define <2 x bfloat> @v_mad_mix_v2f32_clamp_precvt(<2 x bfloat> %src0, <2 x bfloa ret <2 x bfloat> %cvt.result } - define <3 x bfloat> @v_mad_mix_v3f32_clamp_precvt(<3 x bfloat> %src0, <3 x bfloat> %src1, <3 x bfloat> %src2) #0 { ; GFX1250-LABEL: v_mad_mix_v3f32_clamp_precvt: ; GFX1250: ; %bb.0: diff --git a/llvm/test/CodeGen/AMDGPU/ps-shader-arg-count.ll b/llvm/test/CodeGen/AMDGPU/ps-shader-arg-count.ll index 013b68a..99e5d00 100644 --- a/llvm/test/CodeGen/AMDGPU/ps-shader-arg-count.ll +++ b/llvm/test/CodeGen/AMDGPU/ps-shader-arg-count.ll @@ -1,5 +1,7 @@ -;RUN: llc < %s -mtriple=amdgcn-pal -mcpu=gfx1010 | FileCheck %s --check-prefixes=CHECK -;RUN: llc < %s -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 | FileCheck %s --check-prefixes=CHECK +;RUN: llc -global-isel=1 < %s -mtriple=amdgcn-pal -mcpu=gfx1010 | FileCheck %s --check-prefixes=CHECK +;RUN: llc -global-isel=1 < %s -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 | FileCheck %s --check-prefixes=CHECK +;RUN: llc -global-isel=0 < %s -mtriple=amdgcn-pal -mcpu=gfx1010 | FileCheck %s --check-prefixes=CHECK +;RUN: llc -global-isel=0 < %s -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 | FileCheck %s --check-prefixes=CHECK ; ;CHECK-LABEL: {{^}}_amdgpu_ps_1_arg: ; ;CHECK: NumVgprs: 4 diff --git a/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.ll b/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.ll index 0b43ff2..b35a74e 100644 --- a/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.ll +++ b/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.ll @@ -200,8 +200,199 @@ bb: ret void } -declare <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float, float, <32 x float>, i32 immarg, i32 immarg, i32 immarg) #1 -declare noundef i32 @llvm.amdgcn.workitem.id.x() #2 +; The inline asm requires the value be copied to an AGPR class, not +; the AV_* pseudo we usually expect for register allocator live range +; splits. +define amdgpu_kernel void @test_rewrite_mfma_direct_copy_to_agpr_class(ptr addrspace(1) %arg) #0 { +; CHECK-LABEL: test_rewrite_mfma_direct_copy_to_agpr_class: +; CHECK: ; %bb.0: ; %bb +; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0 +; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; CHECK-NEXT: v_lshlrev_b32_e32 v0, 7, v0 +; CHECK-NEXT: v_mov_b32_e32 v32, 2.0 +; CHECK-NEXT: v_mov_b32_e32 v33, 4.0 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[28:31], v0, s[0:1] offset:112 +; CHECK-NEXT: global_load_dwordx4 a[24:27], v0, s[0:1] offset:96 +; CHECK-NEXT: global_load_dwordx4 a[20:23], v0, s[0:1] offset:80 +; CHECK-NEXT: global_load_dwordx4 a[16:19], v0, s[0:1] offset:64 +; CHECK-NEXT: global_load_dwordx4 a[12:15], v0, s[0:1] offset:48 +; CHECK-NEXT: global_load_dwordx4 a[8:11], v0, s[0:1] offset:32 +; CHECK-NEXT: global_load_dwordx4 a[4:7], v0, s[0:1] offset:16 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v0, s[0:1] +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 a[0:31], v32, v33, a[0:31] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:31] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_endpgm +bb: + %id = call i32 @llvm.amdgcn.workitem.id.x() + %gep = getelementptr <32 x float>, ptr addrspace(1) %arg, i32 %id + %in = load <32 x float>, ptr addrspace(1) %gep, align 128 + %mai = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 2.0, float 4.0, <32 x float> %in, i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<32 x float> %mai) + ret void +} + +; TODO: Handle rewriting this case +define void @test_rewrite_mfma_imm_src2(float %arg0, float %arg1) #0 { +; CHECK-LABEL: test_rewrite_mfma_imm_src2: +; CHECK: ; %bb.0: ; %bb +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[0:31], v0, v1, 2.0 +; CHECK-NEXT: s_nop 7 +; CHECK-NEXT: s_nop 7 +; CHECK-NEXT: s_nop 1 +; CHECK-NEXT: v_accvgpr_write_b32 a0, v0 +; CHECK-NEXT: v_accvgpr_write_b32 a1, v1 +; CHECK-NEXT: v_accvgpr_write_b32 a2, v2 +; CHECK-NEXT: v_accvgpr_write_b32 a3, v3 +; CHECK-NEXT: v_accvgpr_write_b32 a4, v4 +; CHECK-NEXT: v_accvgpr_write_b32 a5, v5 +; CHECK-NEXT: v_accvgpr_write_b32 a6, v6 +; CHECK-NEXT: v_accvgpr_write_b32 a7, v7 +; CHECK-NEXT: v_accvgpr_write_b32 a8, v8 +; CHECK-NEXT: v_accvgpr_write_b32 a9, v9 +; CHECK-NEXT: v_accvgpr_write_b32 a10, v10 +; CHECK-NEXT: v_accvgpr_write_b32 a11, v11 +; CHECK-NEXT: v_accvgpr_write_b32 a12, v12 +; CHECK-NEXT: v_accvgpr_write_b32 a13, v13 +; CHECK-NEXT: v_accvgpr_write_b32 a14, v14 +; CHECK-NEXT: v_accvgpr_write_b32 a15, v15 +; CHECK-NEXT: v_accvgpr_write_b32 a16, v16 +; CHECK-NEXT: v_accvgpr_write_b32 a17, v17 +; CHECK-NEXT: v_accvgpr_write_b32 a18, v18 +; CHECK-NEXT: v_accvgpr_write_b32 a19, v19 +; CHECK-NEXT: v_accvgpr_write_b32 a20, v20 +; CHECK-NEXT: v_accvgpr_write_b32 a21, v21 +; CHECK-NEXT: v_accvgpr_write_b32 a22, v22 +; CHECK-NEXT: v_accvgpr_write_b32 a23, v23 +; CHECK-NEXT: v_accvgpr_write_b32 a24, v24 +; CHECK-NEXT: v_accvgpr_write_b32 a25, v25 +; CHECK-NEXT: v_accvgpr_write_b32 a26, v26 +; CHECK-NEXT: v_accvgpr_write_b32 a27, v27 +; CHECK-NEXT: v_accvgpr_write_b32 a28, v28 +; CHECK-NEXT: v_accvgpr_write_b32 a29, v29 +; CHECK-NEXT: v_accvgpr_write_b32 a30, v30 +; CHECK-NEXT: v_accvgpr_write_b32 a31, v31 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:31] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] +bb: + %mai = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float %arg0, float %arg1, <32 x float> splat (float 2.0), i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<32 x float> %mai) + ret void +} + +; TODO: Handle rewriting this case +define void @test_rewrite_mfma_subreg_extract0(float %arg0, float %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_subreg_extract0: +; CHECK: ; %bb.0: ; %bb +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 v[30:33], v[2:3], off offset:112 +; CHECK-NEXT: global_load_dwordx4 v[26:29], v[2:3], off offset:96 +; CHECK-NEXT: global_load_dwordx4 v[22:25], v[2:3], off offset:80 +; CHECK-NEXT: global_load_dwordx4 v[18:21], v[2:3], off offset:64 +; CHECK-NEXT: global_load_dwordx4 v[14:17], v[2:3], off offset:48 +; CHECK-NEXT: global_load_dwordx4 v[10:13], v[2:3], off offset:32 +; CHECK-NEXT: global_load_dwordx4 v[6:9], v[2:3], off offset:16 +; CHECK-NEXT: s_nop 0 +; CHECK-NEXT: global_load_dwordx4 v[2:5], v[2:3], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[2:33], v0, v1, v[2:33] +; CHECK-NEXT: s_nop 7 +; CHECK-NEXT: s_nop 7 +; CHECK-NEXT: s_nop 1 +; CHECK-NEXT: v_accvgpr_write_b32 a0, v2 +; CHECK-NEXT: v_accvgpr_write_b32 a1, v3 +; CHECK-NEXT: v_accvgpr_write_b32 a2, v4 +; CHECK-NEXT: v_accvgpr_write_b32 a3, v5 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:3] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] +bb: + %src2 = load <32 x float>, ptr addrspace(1) %ptr + %mai = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float %arg0, float %arg1, <32 x float> %src2, i32 0, i32 0, i32 0) + %extract.sub4 = shufflevector <32 x float> %mai, <32 x float> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + call void asm sideeffect "; use $0", "a"(<4 x float> %extract.sub4) + ret void +} + +define void @test_rewrite_mfma_subreg_extract1(float %arg0, float %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_subreg_extract1: +; CHECK: ; %bb.0: ; %bb +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 v[30:33], v[2:3], off offset:112 +; CHECK-NEXT: global_load_dwordx4 v[26:29], v[2:3], off offset:96 +; CHECK-NEXT: global_load_dwordx4 v[22:25], v[2:3], off offset:80 +; CHECK-NEXT: global_load_dwordx4 v[18:21], v[2:3], off offset:64 +; CHECK-NEXT: global_load_dwordx4 v[14:17], v[2:3], off offset:48 +; CHECK-NEXT: global_load_dwordx4 v[10:13], v[2:3], off offset:32 +; CHECK-NEXT: global_load_dwordx4 v[6:9], v[2:3], off offset:16 +; CHECK-NEXT: s_nop 0 +; CHECK-NEXT: global_load_dwordx4 v[2:5], v[2:3], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[2:33], v0, v1, v[2:33] +; CHECK-NEXT: s_nop 7 +; CHECK-NEXT: s_nop 7 +; CHECK-NEXT: s_nop 1 +; CHECK-NEXT: v_accvgpr_write_b32 a0, v6 +; CHECK-NEXT: v_accvgpr_write_b32 a1, v7 +; CHECK-NEXT: v_accvgpr_write_b32 a2, v8 +; CHECK-NEXT: v_accvgpr_write_b32 a3, v9 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:3] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] +bb: + %src2 = load <32 x float>, ptr addrspace(1) %ptr + %mai = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float %arg0, float %arg1, <32 x float> %src2, i32 0, i32 0, i32 0) + %extract.sub4 = shufflevector <32 x float> %mai, <32 x float> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7> + call void asm sideeffect "; use $0", "a"(<4 x float> %extract.sub4) + ret void +} + +; odd offset +define void @test_rewrite_mfma_subreg_extract2(float %arg0, float %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_subreg_extract2: +; CHECK: ; %bb.0: ; %bb +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 v[30:33], v[2:3], off offset:112 +; CHECK-NEXT: global_load_dwordx4 v[26:29], v[2:3], off offset:96 +; CHECK-NEXT: global_load_dwordx4 v[22:25], v[2:3], off offset:80 +; CHECK-NEXT: global_load_dwordx4 v[18:21], v[2:3], off offset:64 +; CHECK-NEXT: global_load_dwordx4 v[14:17], v[2:3], off offset:48 +; CHECK-NEXT: global_load_dwordx4 v[10:13], v[2:3], off offset:32 +; CHECK-NEXT: global_load_dwordx4 v[6:9], v[2:3], off offset:16 +; CHECK-NEXT: s_nop 0 +; CHECK-NEXT: global_load_dwordx4 v[2:5], v[2:3], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[2:33], v0, v1, v[2:33] +; CHECK-NEXT: s_nop 7 +; CHECK-NEXT: s_nop 7 +; CHECK-NEXT: s_nop 1 +; CHECK-NEXT: v_accvgpr_write_b32 a0, v3 +; CHECK-NEXT: v_accvgpr_write_b32 a1, v4 +; CHECK-NEXT: v_accvgpr_write_b32 a2, v5 +; CHECK-NEXT: v_accvgpr_write_b32 a3, v6 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:3] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] +bb: + %src2 = load <32 x float>, ptr addrspace(1) %ptr + %mai = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float %arg0, float %arg1, <32 x float> %src2, i32 0, i32 0, i32 0) + %extract.sub4 = shufflevector <32 x float> %mai, <32 x float> poison, <4 x i32> <i32 1, i32 2, i32 3, i32 4> + call void asm sideeffect "; use $0", "a"(<4 x float> %extract.sub4) + ret void +} + +declare <4 x float> @llvm.amdgcn.mfma.f32.16x16x16f16(<4 x half>, <4 x half>, <4 x float>, i32 immarg, i32 immarg, i32 immarg) #2 +declare <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float, float, <32 x float>, i32 immarg, i32 immarg, i32 immarg) #2 +declare noundef range(i32 0, 1024) i32 @llvm.amdgcn.workitem.id.x() #3 attributes #0 = { nounwind "amdgpu-flat-work-group-size"="1,256" "amdgpu-waves-per-eu"="4,4" } attributes #1 = { convergent nocallback nofree nosync nounwind willreturn memory(none) } diff --git a/llvm/test/CodeGen/AMDGPU/sgpr-count-graphics.ll b/llvm/test/CodeGen/AMDGPU/sgpr-count-graphics.ll new file mode 100644 index 0000000..3c7b5bf --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/sgpr-count-graphics.ll @@ -0,0 +1,38 @@ +; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 < %s | FileCheck %s --check-prefixes=CHECK,PACKED16 +; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=tahiti < %s | FileCheck %s --check-prefixes=CHECK,SPLIT16 + +@global = addrspace(1) global i32 poison, align 4 + +; The hardware initializes the registers received as arguments by entry points, +; so they will be counted even if unused. + +; Vectors of i1 are always unpacked + +; CHECK-LABEL: vec_of_i1: +; CHECK: TotalNumSgprs: 8 +define amdgpu_ps void @vec_of_i1(<8 x i1> inreg %v8i1) { + ret void +} + +; Vectors of i8 are always unpacked + +; CHECK-LABEL: vec_of_i8: +; CHECK: TotalNumSgprs: 4 +define amdgpu_ps void @vec_of_i8(<4 x i8> inreg %v4i8) { + ret void +} + +; Vectors of 16-bit types are packed for newer architectures and unpacked for older ones. + +; CHECK-LABEL: vec_of_16_bit_ty: +; PACKED16: TotalNumSgprs: 3 +; SPLIT16: TotalNumSgprs: 6 +define amdgpu_ps void @vec_of_16_bit_ty(<2 x i16> inreg %v2i16, <4 x half> inreg %v4half) { + ret void +} + +; CHECK-LABEL: buffer_fat_ptr: +; CHECK: TotalNumSgprs: 5 +define amdgpu_ps void @buffer_fat_ptr(ptr addrspace(7) inreg %p) { + ret void +} diff --git a/llvm/test/CodeGen/AMDGPU/test_isel_single_lane.ll b/llvm/test/CodeGen/AMDGPU/test_isel_single_lane.ll deleted file mode 100644 index 726e35d..0000000 --- a/llvm/test/CodeGen/AMDGPU/test_isel_single_lane.ll +++ /dev/null @@ -1,47 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 < %s | FileCheck -check-prefix=GCN %s - -declare i32 @llvm.amdgcn.atomic.cond.sub.u32.p1(ptr addrspace(1), i32) - - -define amdgpu_kernel void @test_isel_single_lane(ptr addrspace(1) %in, ptr addrspace(1) %out) #0 { -; GCN-LABEL: test_isel_single_lane: -; GCN: ; %bb.0: -; GCN-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GCN-NEXT: s_wait_kmcnt 0x0 -; GCN-NEXT: s_load_b32 s4, s[0:1], 0x58 -; GCN-NEXT: s_wait_kmcnt 0x0 -; GCN-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s4 -; GCN-NEXT: global_atomic_cond_sub_u32 v1, v0, v1, s[0:1] offset:16 th:TH_ATOMIC_RETURN -; GCN-NEXT: s_wait_loadcnt 0x0 -; GCN-NEXT: v_readfirstlane_b32 s0, v1 -; GCN-NEXT: s_addk_co_i32 s0, 0xf4 -; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GCN-NEXT: s_lshl_b32 s1, s0, 4 -; GCN-NEXT: s_mul_i32 s0, s0, s1 -; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GCN-NEXT: s_lshl_b32 s0, s0, 12 -; GCN-NEXT: s_sub_co_i32 s0, s1, s0 -; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GCN-NEXT: v_mov_b32_e32 v1, s0 -; GCN-NEXT: global_store_b32 v0, v1, s[2:3] -; GCN-NEXT: s_endpgm - %gep0 = getelementptr i32, ptr addrspace(1) %in, i32 22 - %val0 = load i32, ptr addrspace(1) %gep0, align 4 - %gep1 = getelementptr i32, ptr addrspace(1) %in, i32 4 - %val1 = call i32 @llvm.amdgcn.atomic.cond.sub.u32.p0(ptr addrspace(1) %gep1, i32 %val0) - %res0 = add i32 %val1, 244 - %res1 = shl i32 %res0, 4 - %res2 = mul i32 %res0, %res1 - %res3 = shl i32 %res2, 12 - %res4 = sub i32 %res1, %res3 - store i32 %res4, ptr addrspace(1) %out - ret void -} - - -attributes #0 = { - "amdgpu-flat-work-group-size"="1,1" - "amdgpu-waves-per-eu"="1,1" - "uniform-work-group-size"="true" -} diff --git a/llvm/test/CodeGen/AMDGPU/wave_dispatch_regs.ll b/llvm/test/CodeGen/AMDGPU/wave_dispatch_regs.ll index 76c331c..e2ef60b 100644 --- a/llvm/test/CodeGen/AMDGPU/wave_dispatch_regs.ll +++ b/llvm/test/CodeGen/AMDGPU/wave_dispatch_regs.ll @@ -1,6 +1,9 @@ -; RUN: llc -mtriple=amdgcn--amdpal < %s | FileCheck -check-prefix=GCN -check-prefix=SI -enable-var-scope %s -; RUN: llc -mtriple=amdgcn--amdpal -mcpu=tonga < %s | FileCheck -check-prefix=GCN -check-prefix=VI -enable-var-scope %s -; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx900 < %s | FileCheck -check-prefix=GCN -check-prefix=GFX9 -enable-var-scope %s +; RUN: llc -global-isel=1 -mtriple=amdgcn--amdpal < %s | FileCheck -check-prefix=GCN -check-prefix=SI -enable-var-scope %s +; RUN: llc -global-isel=1 -mtriple=amdgcn--amdpal -mcpu=tonga < %s | FileCheck -check-prefix=GCN -check-prefix=VI -enable-var-scope %s +; RUN: llc -global-isel=1 -mtriple=amdgcn--amdpal -mcpu=gfx900 < %s | FileCheck -check-prefix=GCN -check-prefix=GFX9 -enable-var-scope %s +; RUN: llc -global-isel=0 -mtriple=amdgcn--amdpal < %s | FileCheck -check-prefix=GCN -check-prefix=SI -enable-var-scope %s +; RUN: llc -global-isel=0 -mtriple=amdgcn--amdpal -mcpu=tonga < %s | FileCheck -check-prefix=GCN -check-prefix=VI -enable-var-scope %s +; RUN: llc -global-isel=0 -mtriple=amdgcn--amdpal -mcpu=gfx900 < %s | FileCheck -check-prefix=GCN -check-prefix=GFX9 -enable-var-scope %s ; This compute shader has input args that claim that it has 17 sgprs and 5 vgprs ; in wave dispatch. Ensure that the sgpr and vgpr counts in COMPUTE_PGM_RSRC1 @@ -17,7 +20,7 @@ ; GCN-NEXT: .scratch_memory_size: 0 ; SI-NEXT: .sgpr_count: 0x11 ; VI-NEXT: .sgpr_count: 0x60 -; GFX9-NEXT: .sgpr_count: 0x11 +; GFX9-NEXT: .sgpr_count: 0x15 ; SI-NEXT: .vgpr_count: 0x5 ; VI-NEXT: .vgpr_count: 0x5 ; GFX9-NEXT: .vgpr_count: 0x5 diff --git a/llvm/test/CodeGen/BPF/loop-exit-cond.ll b/llvm/test/CodeGen/BPF/loop-exit-cond.ll index 69fe714..fa6a4a0 100644 --- a/llvm/test/CodeGen/BPF/loop-exit-cond.ll +++ b/llvm/test/CodeGen/BPF/loop-exit-cond.ll @@ -35,14 +35,14 @@ define dso_local i32 @test(i32 %len, ptr %data) #0 { ; CHECK-NEXT: br i1 [[OR_COND]], label [[FOR_BODY:%.*]], label [[IF_END:%.*]] ; CHECK: for.body: ; CHECK-NEXT: [[I_05:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 1, [[ENTRY:%.*]] ] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr nonnull [[D]]) #[[ATTR3:[0-9]+]] +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[D]]) #[[ATTR3:[0-9]+]] ; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[DATA]], align 1, !tbaa [[TBAA3:![0-9]+]] ; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i8 [[TMP1]], 0 ; CHECK-NEXT: [[NARROW:%.*]] = select i1 [[TOBOOL_NOT]], i8 48, i8 [[TMP1]] ; CHECK-NEXT: [[CONV2:%.*]] = sext i8 [[NARROW]] to i64 ; CHECK-NEXT: store i64 [[CONV2]], ptr [[D]], align 8, !tbaa [[TBAA6:![0-9]+]] ; CHECK-NEXT: call void @foo(ptr nonnull @.str, i32 [[I_05]], ptr nonnull [[D]]) #[[ATTR3]] -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr nonnull [[D]]) #[[ATTR3]] +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[D]]) #[[ATTR3]] ; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_05]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[LEN]] ; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[IF_END]], label [[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] @@ -61,7 +61,7 @@ entry: br i1 %cmp, label %if.then, label %if.end if.then: ; preds = %entry - call void @llvm.lifetime.start.p0(i64 4, ptr %i) #3 + call void @llvm.lifetime.start.p0(ptr %i) #3 store i32 1, ptr %i, align 4, !tbaa !3 br label %for.cond @@ -73,11 +73,11 @@ for.cond: ; preds = %for.inc, %if.then for.cond.cleanup: ; preds = %for.cond - call void @llvm.lifetime.end.p0(i64 4, ptr %i) #3 + call void @llvm.lifetime.end.p0(ptr %i) #3 br label %for.end for.body: ; preds = %for.cond - call void @llvm.lifetime.start.p0(i64 8, ptr %d) #3 + call void @llvm.lifetime.start.p0(ptr %d) #3 %3 = load ptr, ptr %data.addr, align 8, !tbaa !7 %4 = load i8, ptr %3, align 1, !tbaa !9 %conv = sext i8 %4 to i32 @@ -96,7 +96,7 @@ cond.end: ; preds = %cond.false, %cond.t store i64 %conv2, ptr %d, align 8, !tbaa !10 %5 = load i32, ptr %i, align 4, !tbaa !3 call void @foo(ptr @.str, i32 %5, ptr %d) - call void @llvm.lifetime.end.p0(i64 8, ptr %d) #3 + call void @llvm.lifetime.end.p0(ptr %d) #3 br label %for.inc for.inc: ; preds = %cond.end @@ -113,12 +113,12 @@ if.end: ; preds = %for.end, %entry } ; Function Attrs: argmemonly nofree nosync nounwind willreturn -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 declare dso_local void @foo(ptr, i32, ptr) #2 ; Function Attrs: argmemonly nofree nosync nounwind willreturn -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 attributes #0 = { nounwind "frame-pointer"="all" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" } attributes #1 = { argmemonly nofree nosync nounwind willreturn } diff --git a/llvm/test/CodeGen/BPF/vla.ll b/llvm/test/CodeGen/BPF/vla.ll index 9a22769..708b41e 100644 --- a/llvm/test/CodeGen/BPF/vla.ll +++ b/llvm/test/CodeGen/BPF/vla.ll @@ -33,17 +33,17 @@ define dso_local i32 @test1() { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[SAVED_STACK:%.*]] = alloca ptr, align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]]) ; CHECK-NEXT: store i32 8, ptr [[A]], align 4 ; CHECK-NEXT: [[VLA:%.*]] = alloca i8, i64 68, align 1 ; CHECK-NEXT: call void @foo(ptr [[VLA]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]]) ; CHECK-NEXT: ret i32 0 ; entry: %a = alloca i32, align 4 %saved_stack = alloca ptr, align 8 - call void @llvm.lifetime.start.p0(i64 4, ptr %a) + call void @llvm.lifetime.start.p0(ptr %a) store i32 8, ptr %a, align 4 %0 = call ptr @llvm.stacksave() store ptr %0, ptr %saved_stack, align 8 @@ -51,11 +51,11 @@ entry: call void @foo(ptr %vla) %1 = load ptr, ptr %saved_stack, align 8 call void @llvm.stackrestore(ptr %1) - call void @llvm.lifetime.end.p0(i64 4, ptr %a) + call void @llvm.lifetime.end.p0(ptr %a) ret i32 0 } -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) declare ptr @llvm.stacksave() @@ -63,7 +63,7 @@ declare dso_local void @foo(ptr) declare void @llvm.stackrestore(ptr) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) define dso_local i32 @test2(i32 %b) { ; CHECK-LABEL: @test2( @@ -73,7 +73,7 @@ define dso_local i32 @test2(i32 %b) { ; CHECK-NEXT: [[SAVED_STACK:%.*]] = alloca ptr, align 8 ; CHECK-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 ; CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]]) ; CHECK-NEXT: store i32 8, ptr [[A]], align 4 ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4 ; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 8, [[TMP1]] @@ -81,7 +81,7 @@ define dso_local i32 @test2(i32 %b) { ; CHECK-NEXT: [[VLA:%.*]] = alloca i8, i64 [[TMP2]], align 1 ; CHECK-NEXT: store i64 [[TMP2]], ptr [[__VLA_EXPR0]], align 8 ; CHECK-NEXT: call void @foo(ptr [[VLA]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]]) ; CHECK-NEXT: ret i32 0 ; entry: @@ -90,7 +90,7 @@ entry: %saved_stack = alloca ptr, align 8 %__vla_expr0 = alloca i64, align 8 store i32 %b, ptr %b.addr, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr %a) + call void @llvm.lifetime.start.p0(ptr %a) store i32 8, ptr %a, align 4 %0 = load i32, ptr %b.addr, align 4 %add = add nsw i32 8, %0 @@ -102,6 +102,6 @@ entry: call void @foo(ptr %vla) %3 = load ptr, ptr %saved_stack, align 8 call void @llvm.stackrestore(ptr %3) - call void @llvm.lifetime.end.p0(i64 4, ptr %a) + call void @llvm.lifetime.end.p0(ptr %a) ret i32 0 } diff --git a/llvm/test/CodeGen/Generic/half.ll b/llvm/test/CodeGen/Generic/half.ll new file mode 100644 index 0000000..f4ea5b5 --- /dev/null +++ b/llvm/test/CodeGen/Generic/half.ll @@ -0,0 +1,87 @@ +; Simple cross-platform smoke checks for basic f16 operations. +; +; There shouldn't be any architectures that crash when trying to use `half`; +; check that here. Additionally do a small handful of smoke tests that work +; well cross-platform. + +; RUN: %if aarch64-registered-target %{ llc %s -o - -mtriple=aarch64-apple-darwin | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if aarch64-registered-target %{ llc %s -o - -mtriple=aarch64-pc-windows-msvc | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if aarch64-registered-target %{ llc %s -o - -mtriple=aarch64-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %} +; FIXME(#94434) unsupported on arm64ec +; RUN: %if aarch64-registered-target %{ ! llc %s -o - -mtriple=arm64ec-pc-windows-msvc -filetype=null %} +; RUN: %if amdgpu-registered-target %{ llc %s -o - -mtriple=amdgcn-amd-amdhsa | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if arc-registered-target %{ llc %s -o - -mtriple=arc-elf | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if arm-registered-target %{ llc %s -o - -mtriple=arm-unknown-linux-gnueabi | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if arm-registered-target %{ llc %s -o - -mtriple=thumbv7em-none-eabi | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if avr-registered-target %{ llc %s -o - -mtriple=avr-none | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if bpf-registered-target %{ llc %s -o - -mtriple=bpfel | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if csky-registered-target %{ llc %s -o - -mtriple=csky-unknown-linux-gnuabiv2 | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if csky-registered-target %{ llc %s -o - -mtriple=csky-unknown-linux-gnuabiv2 -mcpu=ck860fv -mattr=+hard-float | FileCheck %s --check-prefixes=ALL,BAD %} +; RUN: %if directx-registered-target %{ llc %s -o - -mtriple=dxil-pc-shadermodel6.3-library | FileCheck %s --check-prefixes=NOCRASH %} +; RUN: %if hexagon-registered-target %{ llc %s -o - -mtriple=hexagon-unknown-linux-musl | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if lanai-registered-target %{ llc %s -o - -mtriple=lanai-unknown-unknown | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if loongarch-registered-target %{ llc %s -o - -mtriple=loongarch32-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if loongarch-registered-target %{ llc %s -o - -mtriple=loongarch64-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if loongarch-registered-target %{ llc %s -o - -mtriple=loongarch64-unknown-linux-gnu -mattr=+f | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if m68k-registered-target %{ llc %s -o - -mtriple=m68k-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if mips-registered-target %{ llc %s -o - -mtriple=mips-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if mips-registered-target %{ llc %s -o - -mtriple=mips64-unknown-linux-gnuabi64 | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if mips-registered-target %{ llc %s -o - -mtriple=mips64el-unknown-linux-gnuabi64 | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if mips-registered-target %{ llc %s -o - -mtriple=mipsel-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if msp430-registered-target %{ llc %s -o - -mtriple=msp430-none-elf | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if nvptx-registered-target %{ llc %s -o - -mtriple=nvptx64-nvidia-cuda | FileCheck %s --check-prefixes=NOCRASH %} +; RUN: %if powerpc-registered-target %{ llc %s -o - -mtriple=powerpc-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,BAD %} +; RUN: %if powerpc-registered-target %{ llc %s -o - -mtriple=powerpc64-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,BAD %} +; RUN: %if powerpc-registered-target %{ llc %s -o - -mtriple=powerpc64le-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,BAD %} +; RUN: %if riscv-registered-target %{ llc %s -o - -mtriple=riscv32-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if riscv-registered-target %{ llc %s -o - -mtriple=riscv64-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if sparc-registered-target %{ llc %s -o - -mtriple=sparc-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,BAD %} +; RUN: %if sparc-registered-target %{ llc %s -o - -mtriple=sparc64-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,BAD %} +; RUN: %if spirv-registered-target %{ llc %s -o - -mtriple=spirv-unknown-unknown | FileCheck %s --check-prefixes=NOCRASH %} +; RUN: %if systemz-registered-target %{ llc %s -o - -mtriple=s390x-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if ve-registered-target %{ llc %s -o - -mtriple=ve-unknown-unknown | FileCheck %s --check-prefixes=ALL,BAD %} +; RUN: %if webassembly-registered-target %{ llc %s -o - -mtriple=wasm32-unknown-unknown | FileCheck %s --check-prefixes=ALL,BAD %} +; RUN: %if x86-registered-target %{ llc %s -o - -mtriple=i686-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if x86-registered-target %{ llc %s -o - -mtriple=x86_64-pc-windows-msvc | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if x86-registered-target %{ llc %s -o - -mtriple=x86_64-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if xcore-registered-target %{ llc %s -o - -mtriple=xcore-unknown-unknown | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if xtensa-registered-target %{ llc %s -o - -mtriple=xtensa-none-elf | FileCheck %s --check-prefixes=ALL,CHECK %} + +; Codegen tests don't work the same for graphics targets. Add a dummy directive +; for filecheck, just make sure we don't crash. +; NOCRASH: {{.*}} + +; All backends need to be able to bitcast without converting to another format, +; so we assert against __extendhfsf2, __truncsfhf2, __gnu_{h2f,f2h}_ieee. This +; doesn't catch issues on platforms with hardware f32<->f16, but those tend to +; work better anyway. +; Regression test for https://github.com/llvm/llvm-project/issues/97981. + +define half @from_bits(i16 %bits) nounwind { +; ALL-LABEL: from_bits: +; CHECK-NOT: __extend +; CHECK-NOT: __trunc +; CHECK-NOT: __gnu +; BAD: __extendhfsf2 + %f = bitcast i16 %bits to half + ret half %f +} + +define i16 @to_bits(half %f) nounwind { +; ALL-LABEL: to_bits: +; CHECK-NOT: __extend +; CHECK-NOT: __trunc +; CHECK-NOT: __gnu +; BAD: __truncsfhf2 + %bits = bitcast half %f to i16 + ret i16 %bits +} + +; Some platforms have had problems freezing. Regression test for +; https://github.com/llvm/llvm-project/issues/117337 and similar issues. + +define half @check_freeze(half %f) nounwind { +; ALL-LABEL: check_freeze: + %t0 = freeze half %f + ret half %t0 +} diff --git a/llvm/test/CodeGen/MIR/AMDGPU/long-branch-reg-all-sgpr-used.ll b/llvm/test/CodeGen/MIR/AMDGPU/long-branch-reg-all-sgpr-used.ll index 278cf01..929db4c 100644 --- a/llvm/test/CodeGen/MIR/AMDGPU/long-branch-reg-all-sgpr-used.ll +++ b/llvm/test/CodeGen/MIR/AMDGPU/long-branch-reg-all-sgpr-used.ll @@ -17,6 +17,8 @@ ; CHECK-NEXT: waveLimiter: false ; CHECK-NEXT: hasSpilledSGPRs: false ; CHECK-NEXT: hasSpilledVGPRs: false +; CHECK-NEXT: numWaveDispatchSGPRs: 0 +; CHECK-NEXT: numWaveDispatchVGPRs: 0 ; CHECK-NEXT: scratchRSrcReg: '$sgpr96_sgpr97_sgpr98_sgpr99' ; CHECK-NEXT: frameOffsetReg: '$fp_reg' ; CHECK-NEXT: stackPtrOffsetReg: '$sgpr32' @@ -287,6 +289,8 @@ ; CHECK-NEXT: waveLimiter: false ; CHECK-NEXT: hasSpilledSGPRs: false ; CHECK-NEXT: hasSpilledVGPRs: false +; CHECK-NEXT: numWaveDispatchSGPRs: 0 +; CHECK-NEXT: numWaveDispatchVGPRs: 0 ; CHECK-NEXT: scratchRSrcReg: '$sgpr96_sgpr97_sgpr98_sgpr99' ; CHECK-NEXT: frameOffsetReg: '$fp_reg' ; CHECK-NEXT: stackPtrOffsetReg: '$sgpr32' diff --git a/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info-after-pei.ll b/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info-after-pei.ll index 890ea44..f054bea 100644 --- a/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info-after-pei.ll +++ b/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info-after-pei.ll @@ -16,6 +16,8 @@ ; AFTER-PEI-NEXT: waveLimiter: false ; AFTER-PEI-NEXT: hasSpilledSGPRs: true ; AFTER-PEI-NEXT: hasSpilledVGPRs: false +; AFTER-PEI-NEXT: numWaveDispatchSGPRs: 0 +; AFTER-PEI-NEXT: numWaveDispatchVGPRs: 0 ; AFTER-PEI-NEXT: scratchRSrcReg: '$sgpr68_sgpr69_sgpr70_sgpr71' ; AFTER-PEI-NEXT: frameOffsetReg: '$fp_reg' ; AFTER-PEI-NEXT: stackPtrOffsetReg: '$sgpr32' diff --git a/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info-long-branch-reg-debug.ll b/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info-long-branch-reg-debug.ll index f84ef8a..924216e 100644 --- a/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info-long-branch-reg-debug.ll +++ b/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info-long-branch-reg-debug.ll @@ -17,6 +17,8 @@ ; CHECK-NEXT: waveLimiter: false ; CHECK-NEXT: hasSpilledSGPRs: false ; CHECK-NEXT: hasSpilledVGPRs: false +; CHECK-NEXT: numWaveDispatchSGPRs: 0 +; CHECK-NEXT: numWaveDispatchVGPRs: 0 ; CHECK-NEXT: scratchRSrcReg: '$sgpr96_sgpr97_sgpr98_sgpr99' ; CHECK-NEXT: frameOffsetReg: '$fp_reg' ; CHECK-NEXT: stackPtrOffsetReg: '$sgpr32' diff --git a/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info-long-branch-reg.ll b/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info-long-branch-reg.ll index cc834d0..39f1ddd 100644 --- a/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info-long-branch-reg.ll +++ b/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info-long-branch-reg.ll @@ -17,6 +17,8 @@ ; CHECK-NEXT: waveLimiter: false ; CHECK-NEXT: hasSpilledSGPRs: false ; CHECK-NEXT: hasSpilledVGPRs: false +; CHECK-NEXT: numWaveDispatchSGPRs: 0 +; CHECK-NEXT: numWaveDispatchVGPRs: 0 ; CHECK-NEXT: scratchRSrcReg: '$sgpr96_sgpr97_sgpr98_sgpr99' ; CHECK-NEXT: frameOffsetReg: '$fp_reg' ; CHECK-NEXT: stackPtrOffsetReg: '$sgpr32' diff --git a/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info-no-ir.mir b/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info-no-ir.mir index 06c580e..0cb9bc0 100644 --- a/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info-no-ir.mir +++ b/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info-no-ir.mir @@ -17,6 +17,8 @@ # FULL-NEXT: waveLimiter: true # FULL-NEXT: hasSpilledSGPRs: false # FULL-NEXT: hasSpilledVGPRs: false +# FULL-NEXT: numWaveDispatchSGPRs: 0 +# FULL-NEXT: numWaveDispatchVGPRs: 0 # FULL-NEXT: scratchRSrcReg: '$sgpr8_sgpr9_sgpr10_sgpr11' # FULL-NEXT: frameOffsetReg: '$sgpr12' # FULL-NEXT: stackPtrOffsetReg: '$sgpr13' @@ -127,6 +129,8 @@ body: | # FULL-NEXT: waveLimiter: false # FULL-NEXT: hasSpilledSGPRs: false # FULL-NEXT: hasSpilledVGPRs: false +# FULL-NEXT: numWaveDispatchSGPRs: 0 +# FULL-NEXT: numWaveDispatchVGPRs: 0 # FULL-NEXT: scratchRSrcReg: '$private_rsrc_reg' # FULL-NEXT: frameOffsetReg: '$fp_reg' # FULL-NEXT: stackPtrOffsetReg: '$sp_reg' @@ -206,6 +210,8 @@ body: | # FULL-NEXT: waveLimiter: false # FULL-NEXT: hasSpilledSGPRs: false # FULL-NEXT: hasSpilledVGPRs: false +# FULL-NEXT: numWaveDispatchSGPRs: 0 +# FULL-NEXT: numWaveDispatchVGPRs: 0 # FULL-NEXT: scratchRSrcReg: '$private_rsrc_reg' # FULL-NEXT: frameOffsetReg: '$fp_reg' # FULL-NEXT: stackPtrOffsetReg: '$sp_reg' @@ -286,6 +292,8 @@ body: | # FULL-NEXT: waveLimiter: false # FULL-NEXT: hasSpilledSGPRs: false # FULL-NEXT: hasSpilledVGPRs: false +# FULL-NEXT: numWaveDispatchSGPRs: 0 +# FULL-NEXT: numWaveDispatchVGPRs: 0 # FULL-NEXT: scratchRSrcReg: '$private_rsrc_reg' # FULL-NEXT: frameOffsetReg: '$fp_reg' # FULL-NEXT: stackPtrOffsetReg: '$sp_reg' diff --git a/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info.ll b/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info.ll index 4271546..ab4383b 100644 --- a/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info.ll +++ b/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info.ll @@ -20,6 +20,8 @@ ; CHECK-NEXT: waveLimiter: false ; CHECK-NEXT: hasSpilledSGPRs: false ; CHECK-NEXT: hasSpilledVGPRs: false +; CHECK-NEXT: numWaveDispatchSGPRs: 0 +; CHECK-NEXT: numWaveDispatchVGPRs: 0 ; CHECK-NEXT: scratchRSrcReg: '$sgpr96_sgpr97_sgpr98_sgpr99' ; CHECK-NEXT: frameOffsetReg: '$fp_reg' ; CHECK-NEXT: stackPtrOffsetReg: '$sgpr32' @@ -80,6 +82,8 @@ define amdgpu_kernel void @kernel(i32 %arg0, i64 %arg1, <16 x i32> %arg2) { ; CHECK-NEXT: waveLimiter: false ; CHECK-NEXT: hasSpilledSGPRs: false ; CHECK-NEXT: hasSpilledVGPRs: false +; CHECK-NEXT: numWaveDispatchSGPRs: 3 +; CHECK-NEXT: numWaveDispatchVGPRs: 1 ; CHECK-NEXT: scratchRSrcReg: '$sgpr96_sgpr97_sgpr98_sgpr99' ; CHECK-NEXT: frameOffsetReg: '$fp_reg' ; CHECK-NEXT: stackPtrOffsetReg: '$sgpr32' @@ -144,6 +148,8 @@ define amdgpu_ps void @gds_size_shader(i32 %arg0, i32 inreg %arg1) #5 { ; CHECK-NEXT: waveLimiter: false ; CHECK-NEXT: hasSpilledSGPRs: false ; CHECK-NEXT: hasSpilledVGPRs: false +; CHECK-NEXT: numWaveDispatchSGPRs: 16 +; CHECK-NEXT: numWaveDispatchVGPRs: 0 ; CHECK-NEXT: scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3' ; CHECK-NEXT: frameOffsetReg: '$sgpr33' ; CHECK-NEXT: stackPtrOffsetReg: '$sgpr32' @@ -200,6 +206,8 @@ define void @function() { ; CHECK-NEXT: waveLimiter: false ; CHECK-NEXT: hasSpilledSGPRs: false ; CHECK-NEXT: hasSpilledVGPRs: false +; CHECK-NEXT: numWaveDispatchSGPRs: 16 +; CHECK-NEXT: numWaveDispatchVGPRs: 0 ; CHECK-NEXT: scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3' ; CHECK-NEXT: frameOffsetReg: '$sgpr33' ; CHECK-NEXT: stackPtrOffsetReg: '$sgpr32' diff --git a/llvm/test/CodeGen/NVPTX/frameindex-lifetime.ll b/llvm/test/CodeGen/NVPTX/frameindex-lifetime.ll index 4265553..9c564ff 100644 --- a/llvm/test/CodeGen/NVPTX/frameindex-lifetime.ll +++ b/llvm/test/CodeGen/NVPTX/frameindex-lifetime.ll @@ -44,8 +44,8 @@ declare void @bar(ptr) define void @foo() { %p = alloca i32 - call void @llvm.lifetime.start(i64 4, ptr %p) + call void @llvm.lifetime.start(ptr %p) call void @bar(ptr %p) - call void @llvm.lifetime.end(i64 4, ptr %p) + call void @llvm.lifetime.end(ptr %p) ret void } diff --git a/llvm/test/CodeGen/NVPTX/variadics-lowering.ll b/llvm/test/CodeGen/NVPTX/variadics-lowering.ll index 5502980..1d69f8d 100644 --- a/llvm/test/CodeGen/NVPTX/variadics-lowering.ll +++ b/llvm/test/CodeGen/NVPTX/variadics-lowering.ll @@ -119,7 +119,7 @@ define dso_local i32 @foo() { ; CHECK-NEXT: [[CONV:%.*]] = sext i8 1 to i32 ; CHECK-NEXT: [[CONV1:%.*]] = sext i16 1 to i32 ; CHECK-NEXT: [[CONV2:%.*]] = fpext float 1.000000e+00 to double -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 40, ptr [[VARARG_BUFFER]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]]) ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[FOO_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0 ; CHECK-NEXT: store i32 [[CONV]], ptr [[TMP0]], align 4 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[FOO_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 1 @@ -133,7 +133,7 @@ define dso_local i32 @foo() { ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[FOO_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 6 ; CHECK-NEXT: store double 1.000000e+00, ptr [[TMP5]], align 8 ; CHECK-NEXT: [[CALL:%.*]] = call i32 @variadics1(i32 noundef 1, ptr [[VARARG_BUFFER]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 40, ptr [[VARARG_BUFFER]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]]) ; CHECK-NEXT: ret i32 [[CALL]] ; entry: @@ -208,7 +208,7 @@ define dso_local i32 @bar() { ; CHECK-NEXT: [[S1_SROA_2_0_COPYLOAD:%.*]] = load i8, ptr getelementptr inbounds (i8, ptr @__const.bar.s1, i64 4), align 4 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[S1_SROA_3]], ptr align 1 getelementptr inbounds (i8, ptr @__const.bar.s1, i64 5), i64 3, i1 false) ; CHECK-NEXT: [[S1_SROA_31_0_COPYLOAD:%.*]] = load i64, ptr getelementptr inbounds (i8, ptr @__const.bar.s1, i64 8), align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[VARARG_BUFFER]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]]) ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[BAR_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0 ; CHECK-NEXT: store i32 [[S1_SROA_0_0_COPYLOAD]], ptr [[TMP0]], align 4 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[BAR_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 1 @@ -216,7 +216,7 @@ define dso_local i32 @bar() { ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[BAR_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 3 ; CHECK-NEXT: store i64 [[S1_SROA_31_0_COPYLOAD]], ptr [[TMP2]], align 8 ; CHECK-NEXT: [[CALL:%.*]] = call i32 @variadics2(i32 noundef 1, ptr [[VARARG_BUFFER]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[VARARG_BUFFER]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]]) ; CHECK-NEXT: ret i32 [[CALL]] ; entry: @@ -274,11 +274,11 @@ define dso_local i32 @baz() { ; CHECK-LABEL: define dso_local i32 @baz() { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[BAZ_VARARG:%.*]], align 16 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[VARARG_BUFFER]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]]) ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[BAZ_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0 ; CHECK-NEXT: store <4 x i32> splat (i32 1), ptr [[TMP0]], align 16 ; CHECK-NEXT: [[CALL:%.*]] = call i32 @variadics3(i32 noundef 1, ptr [[VARARG_BUFFER]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[VARARG_BUFFER]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]]) ; CHECK-NEXT: ret i32 [[CALL]] ; entry: @@ -333,11 +333,11 @@ define dso_local void @qux() { ; CHECK-NEXT: [[S:%.*]] = alloca [[STRUCT_S2:%.*]], align 8 ; CHECK-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[QUX_VARARG:%.*]], align 8 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[S]], ptr align 8 @__const.qux.s, i64 16, i1 false) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[VARARG_BUFFER]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]]) ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[QUX_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0 ; CHECK-NEXT: store i64 1, ptr [[TMP0]], align 8 ; CHECK-NEXT: [[CALL:%.*]] = call i32 @variadics4(ptr noundef byval([[STRUCT_S2]]) align 8 [[S]], ptr [[VARARG_BUFFER]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[VARARG_BUFFER]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]]) ; CHECK-NEXT: ret void ; entry: diff --git a/llvm/test/CodeGen/PowerPC/NoCRFieldRedefWhenSpillingCRBIT.mir b/llvm/test/CodeGen/PowerPC/NoCRFieldRedefWhenSpillingCRBIT.mir index 41e2124..2796cdb 100644 --- a/llvm/test/CodeGen/PowerPC/NoCRFieldRedefWhenSpillingCRBIT.mir +++ b/llvm/test/CodeGen/PowerPC/NoCRFieldRedefWhenSpillingCRBIT.mir @@ -1,6 +1,12 @@ # RUN: llc -mcpu=pwr8 -mtriple=powerpc64le-unknown-linux-gnu -start-after \ # RUN: virtregrewriter -ppc-asm-full-reg-names -verify-machineinstrs %s \ # RUN: -o - | FileCheck %s +# RUN: llc -mcpu=pwr9 -mtriple=powerpc64le-unknown-linux-gnu -start-after \ +# RUN: virtregrewriter -ppc-asm-full-reg-names -verify-machineinstrs %s \ +# RUN: -o - | FileCheck %s +# RUN: llc -mcpu=pwr10 -mtriple=powerpc64le-unknown-linux-gnu -start-after \ +# RUN: virtregrewriter -ppc-asm-full-reg-names -verify-machineinstrs %s \ +# RUN: -o - | FileCheck %s --- | ; ModuleID = 'a.ll' @@ -30,7 +36,7 @@ ; Function Attrs: nounwind declare void @llvm.stackprotector(ptr, ptr) #1 - attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="ppc64le" "target-features"="+altivec,+bpermd,+crypto,+direct-move,+extdiv,+htm,+power8-vector,+vsx,-power9-vector" "unsafe-fp-math"="false" "use-soft-float"="false" } + attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } attributes #1 = { nounwind } !llvm.ident = !{!0} diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ssegN-store.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ssegN-store.ll new file mode 100644 index 0000000..abf2894 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ssegN-store.ll @@ -0,0 +1,72 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s + +define void @store_factor2(<8 x i8> %v0, <8 x i8> %v1, ptr %ptr, i64 %stride) { +; CHECK-LABEL: store_factor2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; CHECK-NEXT: vssseg2e8.v v8, (a0), a1 +; CHECK-NEXT: ret + call void @llvm.riscv.sseg2.store.mask.v8i8.i64.i64(<8 x i8> %v0, <8 x i8> %v1, ptr %ptr, i64 %stride, <8 x i1> splat (i1 true), i64 8) + ret void +} + +define void @store_factor3(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, ptr %ptr, i64 %stride) { +; CHECK-LABEL: store_factor3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; CHECK-NEXT: vssseg3e8.v v8, (a0), a1 +; CHECK-NEXT: ret + call void @llvm.riscv.sseg3.store.mask.v8i8.i64.i64(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, ptr %ptr, i64 %stride, <8 x i1> splat (i1 true), i64 8) + ret void +} + +define void @store_factor4(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, ptr %ptr, i64 %stride) { +; CHECK-LABEL: store_factor4: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; CHECK-NEXT: vssseg4e8.v v8, (a0), a1 +; CHECK-NEXT: ret + call void @llvm.riscv.sseg4.store.mask.v8i8.i64.i64(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, ptr %ptr, i64 %stride, <8 x i1> splat (i1 true), i64 8) + ret void +} + +define void @store_factor5(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, <8 x i8> %v4, ptr %ptr, i64 %stride) { +; CHECK-LABEL: store_factor5: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; CHECK-NEXT: vssseg5e8.v v8, (a0), a1 +; CHECK-NEXT: ret + call void @llvm.riscv.sseg5.store.mask.v8i8.i64.i64(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, <8 x i8> %v4, ptr %ptr, i64 %stride, <8 x i1> splat (i1 true), i64 8) + ret void +} + +define void @store_factor6(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, <8 x i8> %v4, <8 x i8> %v5, ptr %ptr, i64 %stride) { +; CHECK-LABEL: store_factor6: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; CHECK-NEXT: vssseg6e8.v v8, (a0), a1 +; CHECK-NEXT: ret + call void @llvm.riscv.sseg6.store.mask.v8i8.i64.i64(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, <8 x i8> %v4, <8 x i8> %v5, ptr %ptr, i64 %stride, <8 x i1> splat (i1 true), i64 8) + ret void +} + +define void @store_factor7(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, <8 x i8> %v4, <8 x i8> %v5, <8 x i8> %v6, ptr %ptr, i64 %stride) { +; CHECK-LABEL: store_factor7: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; CHECK-NEXT: vssseg7e8.v v8, (a0), a1 +; CHECK-NEXT: ret + call void @llvm.riscv.sseg7.store.mask.v8i8.i64.i64(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, <8 x i8> %v4, <8 x i8> %v5, <8 x i8> %v6, ptr %ptr, i64 %stride, <8 x i1> splat (i1 true), i64 8) + ret void +} + +define void @store_factor8(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, <8 x i8> %v4, <8 x i8> %v5, <8 x i8> %v6, <8 x i8> %v7, ptr %ptr, i64 %stride) { +; CHECK-LABEL: store_factor8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; CHECK-NEXT: vssseg8e8.v v8, (a0), a1 +; CHECK-NEXT: ret + call void @llvm.riscv.sseg8.store.mask.v8i8.i64.i64(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, <8 x i8> %v4, <8 x i8> %v5, <8 x i8> %v6, <8 x i8> %v7, ptr %ptr, i64 %stride, <8 x i1> splat (i1 true), i64 8) + ret void +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vxrm-insert-out-of-loop.ll b/llvm/test/CodeGen/RISCV/rvv/vxrm-insert-out-of-loop.ll index 7990dfc..4c84304 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vxrm-insert-out-of-loop.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vxrm-insert-out-of-loop.ll @@ -366,8 +366,8 @@ define void @test1(ptr nocapture noundef writeonly %dst, i32 noundef signext %i_ ; RV64X60-NEXT: # => This Inner Loop Header: Depth=2 ; RV64X60-NEXT: vl2r.v v8, (s2) ; RV64X60-NEXT: vl2r.v v10, (s3) -; RV64X60-NEXT: sub s1, s1, t3 ; RV64X60-NEXT: vaaddu.vv v8, v8, v10 +; RV64X60-NEXT: sub s1, s1, t3 ; RV64X60-NEXT: vs2r.v v8, (s4) ; RV64X60-NEXT: add s4, s4, t3 ; RV64X60-NEXT: add s3, s3, t3 diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/lifetime.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/lifetime.ll index 483d707..3d93eca 100644 --- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/lifetime.ll +++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/lifetime.ll @@ -17,11 +17,11 @@ ; CL: OpFunction ; CL: %[[#FooVar:]] = OpVariable ; CL-NEXT: %[[#Casted1:]] = OpBitcast %[[#PtrChar]] %[[#FooVar]] -; CL-NEXT: OpLifetimeStart %[[#Casted1]], 72 +; CL-NEXT: OpLifetimeStart %[[#Casted1]], 16 ; CL-NEXT: OpBitcast ; CL-NEXT: OpInBoundsPtrAccessChain ; CL-NEXT: %[[#Casted2:]] = OpBitcast %[[#PtrChar]] %[[#FooVar]] -; CL-NEXT: OpLifetimeStop %[[#Casted2]], 72 +; CL-NEXT: OpLifetimeStop %[[#Casted2]], 16 ; VK: OpFunction ; VK: %[[#FooVar:]] = OpVariable @@ -29,18 +29,20 @@ ; VK-NEXT: OpReturn define spir_func void @foo(ptr noundef byval(%tprange) align 8 %_arg_UserRange) { %RoundedRangeKernel = alloca %tprange, align 8 - call void @llvm.lifetime.start.p0(i64 72, ptr nonnull %RoundedRangeKernel) + call void @llvm.lifetime.start.p0(ptr nonnull %RoundedRangeKernel) %KernelFunc = getelementptr inbounds i8, ptr %RoundedRangeKernel, i64 8 - call void @llvm.lifetime.end.p0(i64 72, ptr nonnull %RoundedRangeKernel) + call void @llvm.lifetime.end.p0(ptr nonnull %RoundedRangeKernel) ret void } ; CL: OpFunction ; CL: %[[#BarVar:]] = OpVariable -; CL-NEXT: OpLifetimeStart %[[#BarVar]], 0 +; CL-NEXT: %[[#Casted1:]] = OpBitcast %[[#PtrChar]] %[[#BarVar]] +; CL-NEXT: OpLifetimeStart %[[#Casted1]], 16 ; CL-NEXT: OpBitcast ; CL-NEXT: OpInBoundsPtrAccessChain -; CL-NEXT: OpLifetimeStop %[[#BarVar]], 0 +; CL-NEXT: %[[#Casted2:]] = OpBitcast %[[#PtrChar]] %[[#BarVar]] +; CL-NEXT: OpLifetimeStop %[[#Casted2]], 16 ; VK: OpFunction ; VK: %[[#BarVar:]] = OpVariable @@ -48,9 +50,9 @@ define spir_func void @foo(ptr noundef byval(%tprange) align 8 %_arg_UserRange) ; VK-NEXT: OpReturn define spir_func void @bar(ptr noundef byval(%tprange) align 8 %_arg_UserRange) { %RoundedRangeKernel = alloca %tprange, align 8 - call void @llvm.lifetime.start.p0(i64 -1, ptr nonnull %RoundedRangeKernel) + call void @llvm.lifetime.start.p0(ptr nonnull %RoundedRangeKernel) %KernelFunc = getelementptr inbounds i8, ptr %RoundedRangeKernel, i64 8 - call void @llvm.lifetime.end.p0(i64 -1, ptr nonnull %RoundedRangeKernel) + call void @llvm.lifetime.end.p0(ptr nonnull %RoundedRangeKernel) ret void } @@ -66,12 +68,12 @@ define spir_func void @bar(ptr noundef byval(%tprange) align 8 %_arg_UserRange) ; VK-NEXT: OpReturn define spir_func void @test(ptr noundef align 8 %_arg) { %var = alloca i8, align 8 - call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %var) + call void @llvm.lifetime.start.p0(ptr nonnull %var) %KernelFunc = getelementptr inbounds i8, ptr %var, i64 1 - call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %var) + call void @llvm.lifetime.end.p0(ptr nonnull %var) ret void } -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) diff --git a/llvm/test/CodeGen/WebAssembly/expand-variadic-call.ll b/llvm/test/CodeGen/WebAssembly/expand-variadic-call.ll index a27650f..7a90d28 100644 --- a/llvm/test/CodeGen/WebAssembly/expand-variadic-call.ll +++ b/llvm/test/CodeGen/WebAssembly/expand-variadic-call.ll @@ -37,52 +37,52 @@ define hidden void @copy(ptr noundef %va) { ; CHECK-NEXT: %va.addr = alloca ptr, align 4 ; CHECK-NEXT: %cp = alloca ptr, align 4 ; CHECK-NEXT: store ptr %va, ptr %va.addr, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %cp) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull %cp) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr %cp, ptr %va.addr, i32 4, i1 false) ; CHECK-NEXT: %0 = load ptr, ptr %cp, align 4 ; CHECK-NEXT: call void @valist(ptr noundef %0) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %cp) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull %cp) ; CHECK-NEXT: ret void ; entry: %va.addr = alloca ptr, align 4 %cp = alloca ptr, align 4 store ptr %va, ptr %va.addr, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %cp) + call void @llvm.lifetime.start.p0(ptr nonnull %cp) call void @llvm.va_copy.p0(ptr nonnull %cp, ptr nonnull %va.addr) %0 = load ptr, ptr %cp, align 4 call void @valist(ptr noundef %0) - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %cp) + call void @llvm.lifetime.end.p0(ptr nonnull %cp) ret void } -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) declare void @llvm.va_copy.p0(ptr, ptr) declare void @valist(ptr noundef) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) define hidden void @start_once(...) { ; CHECK-LABEL: define {{[^@]+}}@start_once(ptr %varargs) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %s = alloca ptr, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %s) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull %s) ; CHECK-NEXT: store ptr %varargs, ptr %s, align 4 ; CHECK-NEXT: %0 = load ptr, ptr %s, align 4 ; CHECK-NEXT: call void @valist(ptr noundef %0) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %s) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull %s) ; CHECK-NEXT: ret void ; entry: %s = alloca ptr, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %s) + call void @llvm.lifetime.start.p0(ptr nonnull %s) call void @llvm.va_start.p0(ptr nonnull %s) %0 = load ptr, ptr %s, align 4 call void @valist(ptr noundef %0) call void @llvm.va_end.p0(ptr %s) - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %s) + call void @llvm.lifetime.end.p0(ptr nonnull %s) ret void } @@ -95,23 +95,23 @@ define hidden void @start_twice(...) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %s0 = alloca ptr, align 4 ; CHECK-NEXT: %s1 = alloca ptr, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %s0) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %s1) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull %s0) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull %s1) ; CHECK-NEXT: store ptr %varargs, ptr %s0, align 4 ; CHECK-NEXT: %0 = load ptr, ptr %s0, align 4 ; CHECK-NEXT: call void @valist(ptr noundef %0) ; CHECK-NEXT: store ptr %varargs, ptr %s1, align 4 ; CHECK-NEXT: %1 = load ptr, ptr %s1, align 4 ; CHECK-NEXT: call void @valist(ptr noundef %1) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %s1) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %s0) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull %s1) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull %s0) ; CHECK-NEXT: ret void ; entry: %s0 = alloca ptr, align 4 %s1 = alloca ptr, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %s0) - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %s1) + call void @llvm.lifetime.start.p0(ptr nonnull %s0) + call void @llvm.lifetime.start.p0(ptr nonnull %s1) call void @llvm.va_start.p0(ptr nonnull %s0) %0 = load ptr, ptr %s0, align 4 call void @valist(ptr noundef %0) @@ -120,8 +120,8 @@ entry: %1 = load ptr, ptr %s1, align 4 call void @valist(ptr noundef %1) call void @llvm.va_end.p0(ptr %s1) - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %s1) - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %s0) + call void @llvm.lifetime.end.p0(ptr nonnull %s1) + call void @llvm.lifetime.end.p0(ptr nonnull %s0) ret void } @@ -129,11 +129,11 @@ define hidden void @single_i32(i32 noundef %x) { ; CHECK-LABEL: define {{[^@]+}}@single_i32(i32 noundef %x) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %single_i32.vararg, align 16 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %single_i32.vararg, ptr %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store i32 %x, ptr %0, align 4 ; CHECK-NEXT: call void @vararg(ptr %vararg_buffer) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -147,11 +147,11 @@ define hidden void @single_double(double noundef %x) { ; CHECK-LABEL: define {{[^@]+}}@single_double(double noundef %x) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %single_double.vararg, align 16 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %single_double.vararg, ptr %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store double %x, ptr %0, align 8 ; CHECK-NEXT: call void @vararg(ptr %vararg_buffer) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -163,11 +163,11 @@ define hidden void @single_v4f32(<4 x float> noundef %x) { ; CHECK-LABEL: define {{[^@]+}}@single_v4f32(<4 x float> noundef %x) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %single_v4f32.vararg, align 16 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %single_v4f32.vararg, ptr %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store <4 x float> %x, ptr %0, align 16 ; CHECK-NEXT: call void @vararg(ptr %vararg_buffer) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -179,11 +179,11 @@ define hidden void @single_v8f32(<8 x float> noundef %x) { ; CHECK-LABEL: define {{[^@]+}}@single_v8f32(<8 x float> noundef %x) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %single_v8f32.vararg, align 32 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %single_v8f32.vararg, ptr %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store <8 x float> %x, ptr %0, align 32 ; CHECK-NEXT: call void @vararg(ptr %vararg_buffer) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -195,11 +195,11 @@ define hidden void @single_v16f32(<16 x float> noundef %x) { ; CHECK-LABEL: define {{[^@]+}}@single_v16f32(<16 x float> noundef %x) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %single_v16f32.vararg, align 64 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 64, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %single_v16f32.vararg, ptr %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store <16 x float> %x, ptr %0, align 64 ; CHECK-NEXT: call void @vararg(ptr %vararg_buffer) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 64, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -211,11 +211,11 @@ define hidden void @single_v32f32(<32 x float> noundef %x) { ; CHECK-LABEL: define {{[^@]+}}@single_v32f32(<32 x float> noundef %x) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %single_v32f32.vararg, align 128 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 128, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %single_v32f32.vararg, ptr %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store <32 x float> %x, ptr %0, align 128 ; CHECK-NEXT: call void @vararg(ptr %vararg_buffer) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 128, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -227,13 +227,13 @@ define hidden void @i32_double(i32 noundef %x, double noundef %y) { ; CHECK-LABEL: define {{[^@]+}}@i32_double(i32 noundef %x, double noundef %y) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %i32_double.vararg, align 16 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %i32_double.vararg, ptr %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store i32 %x, ptr %0, align 4 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %i32_double.vararg, ptr %vararg_buffer, i32 0, i32 2 ; CHECK-NEXT: store double %y, ptr %1, align 8 ; CHECK-NEXT: call void @vararg(ptr %vararg_buffer) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -245,13 +245,13 @@ define hidden void @double_i32(double noundef %x, i32 noundef %y) { ; CHECK-LABEL: define {{[^@]+}}@double_i32(double noundef %x, i32 noundef %y) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %double_i32.vararg, align 16 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %double_i32.vararg, ptr %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store double %x, ptr %0, align 8 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %double_i32.vararg, ptr %vararg_buffer, i32 0, i32 1 ; CHECK-NEXT: store i32 %y, ptr %1, align 4 ; CHECK-NEXT: call void @vararg(ptr %vararg_buffer) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -265,13 +265,13 @@ define hidden void @i32_libcS(i32 noundef %x, ptr noundef byval(%struct.libcS) a ; CHECK-NEXT: %IndirectAlloca = alloca %struct.libcS, align 8 ; CHECK-NEXT: %vararg_buffer = alloca %i32_libcS.vararg, align 16 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr %IndirectAlloca, ptr %y, i64 24, i1 false) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %i32_libcS.vararg, ptr %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store i32 %x, ptr %0, align 4 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %i32_libcS.vararg, ptr %vararg_buffer, i32 0, i32 1 ; CHECK-NEXT: store ptr %IndirectAlloca, ptr %1, align 4 ; CHECK-NEXT: call void @vararg(ptr %vararg_buffer) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -285,13 +285,13 @@ define hidden void @libcS_i32(ptr noundef byval(%struct.libcS) align 8 %x, i32 n ; CHECK-NEXT: %IndirectAlloca = alloca %struct.libcS, align 8 ; CHECK-NEXT: %vararg_buffer = alloca %libcS_i32.vararg, align 16 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr %IndirectAlloca, ptr %x, i64 24, i1 false) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %libcS_i32.vararg, ptr %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store ptr %IndirectAlloca, ptr %0, align 4 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %libcS_i32.vararg, ptr %vararg_buffer, i32 0, i32 1 ; CHECK-NEXT: store i32 %y, ptr %1, align 4 ; CHECK-NEXT: call void @vararg(ptr %vararg_buffer) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -303,13 +303,13 @@ define hidden void @i32_v4f32(i32 noundef %x, <4 x float> noundef %y) { ; CHECK-LABEL: define {{[^@]+}}@i32_v4f32(i32 noundef %x, <4 x float> noundef %y) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %i32_v4f32.vararg, align 16 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %i32_v4f32.vararg, ptr %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store i32 %x, ptr %0, align 4 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %i32_v4f32.vararg, ptr %vararg_buffer, i32 0, i32 2 ; CHECK-NEXT: store <4 x float> %y, ptr %1, align 16 ; CHECK-NEXT: call void @vararg(ptr %vararg_buffer) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -321,13 +321,13 @@ define hidden void @v4f32_i32(<4 x float> noundef %x, i32 noundef %y) { ; CHECK-LABEL: define {{[^@]+}}@v4f32_i32(<4 x float> noundef %x, i32 noundef %y) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %v4f32_i32.vararg, align 16 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 20, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %v4f32_i32.vararg, ptr %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store <4 x float> %x, ptr %0, align 16 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %v4f32_i32.vararg, ptr %vararg_buffer, i32 0, i32 1 ; CHECK-NEXT: store i32 %y, ptr %1, align 4 ; CHECK-NEXT: call void @vararg(ptr %vararg_buffer) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 20, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -339,13 +339,13 @@ define hidden void @i32_v8f32(i32 noundef %x, <8 x float> noundef %y) { ; CHECK-LABEL: define {{[^@]+}}@i32_v8f32(i32 noundef %x, <8 x float> noundef %y) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %i32_v8f32.vararg, align 32 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 64, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %i32_v8f32.vararg, ptr %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store i32 %x, ptr %0, align 4 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %i32_v8f32.vararg, ptr %vararg_buffer, i32 0, i32 2 ; CHECK-NEXT: store <8 x float> %y, ptr %1, align 32 ; CHECK-NEXT: call void @vararg(ptr %vararg_buffer) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 64, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -357,13 +357,13 @@ define hidden void @v8f32_i32(<8 x float> noundef %x, i32 noundef %y) { ; CHECK-LABEL: define {{[^@]+}}@v8f32_i32(<8 x float> noundef %x, i32 noundef %y) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %v8f32_i32.vararg, align 32 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 36, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %v8f32_i32.vararg, ptr %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store <8 x float> %x, ptr %0, align 32 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %v8f32_i32.vararg, ptr %vararg_buffer, i32 0, i32 1 ; CHECK-NEXT: store i32 %y, ptr %1, align 4 ; CHECK-NEXT: call void @vararg(ptr %vararg_buffer) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 36, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -375,13 +375,13 @@ define hidden void @i32_v16f32(i32 noundef %x, <16 x float> noundef %y) { ; CHECK-LABEL: define {{[^@]+}}@i32_v16f32(i32 noundef %x, <16 x float> noundef %y) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %i32_v16f32.vararg, align 64 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 128, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %i32_v16f32.vararg, ptr %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store i32 %x, ptr %0, align 4 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %i32_v16f32.vararg, ptr %vararg_buffer, i32 0, i32 2 ; CHECK-NEXT: store <16 x float> %y, ptr %1, align 64 ; CHECK-NEXT: call void @vararg(ptr %vararg_buffer) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 128, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -393,13 +393,13 @@ define hidden void @v16f32_i32(<16 x float> noundef %x, i32 noundef %y) { ; CHECK-LABEL: define {{[^@]+}}@v16f32_i32(<16 x float> noundef %x, i32 noundef %y) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %v16f32_i32.vararg, align 64 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 68, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %v16f32_i32.vararg, ptr %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store <16 x float> %x, ptr %0, align 64 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %v16f32_i32.vararg, ptr %vararg_buffer, i32 0, i32 1 ; CHECK-NEXT: store i32 %y, ptr %1, align 4 ; CHECK-NEXT: call void @vararg(ptr %vararg_buffer) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 68, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -411,13 +411,13 @@ define hidden void @i32_v32f32(i32 noundef %x, <32 x float> noundef %y) { ; CHECK-LABEL: define {{[^@]+}}@i32_v32f32(i32 noundef %x, <32 x float> noundef %y) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %i32_v32f32.vararg, align 128 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 256, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %i32_v32f32.vararg, ptr %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store i32 %x, ptr %0, align 4 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %i32_v32f32.vararg, ptr %vararg_buffer, i32 0, i32 2 ; CHECK-NEXT: store <32 x float> %y, ptr %1, align 128 ; CHECK-NEXT: call void @vararg(ptr %vararg_buffer) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 256, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -429,13 +429,13 @@ define hidden void @v32f32_i32(<32 x float> noundef %x, i32 noundef %y) { ; CHECK-LABEL: define {{[^@]+}}@v32f32_i32(<32 x float> noundef %x, i32 noundef %y) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %v32f32_i32.vararg, align 128 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 132, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %v32f32_i32.vararg, ptr %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store <32 x float> %x, ptr %0, align 128 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %v32f32_i32.vararg, ptr %vararg_buffer, i32 0, i32 1 ; CHECK-NEXT: store i32 %y, ptr %1, align 4 ; CHECK-NEXT: call void @vararg(ptr %vararg_buffer) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 132, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -448,11 +448,11 @@ define hidden void @fptr_single_i32(i32 noundef %x) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %fptr_single_i32.vararg, align 16 ; CHECK-NEXT: %0 = load volatile ptr, ptr @vararg_ptr, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer) ; CHECK-NEXT: %1 = getelementptr inbounds nuw %fptr_single_i32.vararg, ptr %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store i32 %x, ptr %1, align 4 ; CHECK-NEXT: call void %0(ptr %vararg_buffer) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -468,11 +468,11 @@ define hidden void @fptr_libcS(ptr noundef byval(%struct.libcS) align 8 %x) { ; CHECK-NEXT: %vararg_buffer = alloca %fptr_libcS.vararg, align 16 ; CHECK-NEXT: %0 = load volatile ptr, ptr @vararg_ptr, align 4 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr %IndirectAlloca, ptr %x, i64 24, i1 false) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer) ; CHECK-NEXT: %1 = getelementptr inbounds nuw %fptr_libcS.vararg, ptr %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store ptr %IndirectAlloca, ptr %1, align 4 ; CHECK-NEXT: call void %0(ptr %vararg_buffer) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer) ; CHECK-NEXT: ret void ; entry: diff --git a/llvm/test/CodeGen/WebAssembly/lower-em-sjlj-alloca.ll b/llvm/test/CodeGen/WebAssembly/lower-em-sjlj-alloca.ll index 0f968de..3264fe9 100644 --- a/llvm/test/CodeGen/WebAssembly/lower-em-sjlj-alloca.ll +++ b/llvm/test/CodeGen/WebAssembly/lower-em-sjlj-alloca.ll @@ -18,7 +18,7 @@ define void @test_static() { ; CHECK-NEXT: i32 1, label %[[ENTRY_SPLIT_SPLIT:.*]] ; CHECK-NEXT: ] ; CHECK: [[ENTRY_SPLIT]]: -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]]) ; CHECK-NEXT: call void @__wasm_setjmp(ptr @buf, i32 1, ptr [[FUNCTIONINVOCATIONID]]) ; CHECK-NEXT: br label %[[ENTRY_SPLIT_SPLIT]] ; CHECK: [[ENTRY_SPLIT_SPLIT]]: @@ -31,7 +31,7 @@ define void @test_static() { ; CHECK: [[_NOEXC:.*:]] ; CHECK-NEXT: ret void ; CHECK: [[ELSE]]: -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]]) ; CHECK-NEXT: ret void ; CHECK: [[CATCH_DISPATCH_LONGJMP]]: ; CHECK-NEXT: [[TMP0:%.*]] = catchswitch within none [label %catch.longjmp] unwind to caller @@ -53,7 +53,7 @@ define void @test_static() { ; entry: %x = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr %x) + call void @llvm.lifetime.start.p0(ptr %x) %call = call i32 @setjmp(ptr @buf) returns_twice %cmp = icmp eq i32 %call, 0 br i1 %cmp, label %if, label %else @@ -63,7 +63,7 @@ if: ret void else: - call void @llvm.lifetime.end.p0(i64 4, ptr %x) + call void @llvm.lifetime.end.p0(ptr %x) ret void } @@ -114,7 +114,7 @@ define void @test_dynamic(i32 %size) { ; entry: %x = alloca i32, i32 %size, align 4 - call void @llvm.lifetime.start.p0(i64 -1, ptr %x) + call void @llvm.lifetime.start.p0(ptr %x) %call = call i32 @setjmp(ptr @buf) returns_twice %cmp = icmp eq i32 %call, 0 br i1 %cmp, label %if, label %else @@ -124,6 +124,6 @@ if: ret void else: - call void @llvm.lifetime.end.p0(i64 -1, ptr %x) + call void @llvm.lifetime.end.p0(ptr %x) ret void } diff --git a/llvm/test/CodeGen/WebAssembly/returned.ll b/llvm/test/CodeGen/WebAssembly/returned.ll index aef75d8..bad9d60 100644 --- a/llvm/test/CodeGen/WebAssembly/returned.ll +++ b/llvm/test/CodeGen/WebAssembly/returned.ll @@ -99,8 +99,8 @@ define void @test() { ; CHECK-NEXT: return entry: %a = alloca i32 - call void @llvm.lifetime.start.p0(i64 4, ptr %a) + call void @llvm.lifetime.start.p0(ptr %a) %ret = call ptr @returns_arg(ptr %a) - call void @llvm.lifetime.end.p0(i64 4, ptr %a) + call void @llvm.lifetime.end.p0(ptr %a) ret void } diff --git a/llvm/test/CodeGen/X86/GlobalISel/ptrtoaddr.ll b/llvm/test/CodeGen/X86/GlobalISel/ptrtoaddr.ll new file mode 100644 index 0000000..f65d99d --- /dev/null +++ b/llvm/test/CodeGen/X86/GlobalISel/ptrtoaddr.ll @@ -0,0 +1,109 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=x86_64-linux-gnu -global-isel < %s -o - | FileCheck %s --check-prefix=CHECK + +define i1 @ptrtoaddr_1(ptr %p) { +; CHECK-LABEL: ptrtoaddr_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: movq %rdi, %rax +; CHECK-NEXT: xorb $1, %al +; CHECK-NEXT: # kill: def $al killed $al killed $rax +; CHECK-NEXT: retq +entry: + %addr = ptrtoaddr ptr %p to i64 + %trunc = trunc i64 %addr to i1 + %ret = xor i1 %trunc, 1 + ret i1 %ret +} + +define i8 @ptrtoaddr_8(ptr %p) { +; CHECK-LABEL: ptrtoaddr_8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: movq %rdi, %rax +; CHECK-NEXT: notb %al +; CHECK-NEXT: # kill: def $al killed $al killed $rax +; CHECK-NEXT: retq +entry: + %addr = ptrtoaddr ptr %p to i64 + %trunc = trunc i64 %addr to i8 + %ret = xor i8 %trunc, -1 + ret i8 %ret +} + +define i16 @ptrtoaddr_16(ptr %p) { +; CHECK-LABEL: ptrtoaddr_16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: movq %rdi, %rax +; CHECK-NEXT: notw %ax +; CHECK-NEXT: # kill: def $ax killed $ax killed $rax +; CHECK-NEXT: retq +entry: + %addr = ptrtoaddr ptr %p to i64 + %trunc = trunc i64 %addr to i16 + %ret = xor i16 %trunc, -1 + ret i16 %ret +} + +define i32 @ptrtoaddr_32(ptr %p) { +; CHECK-LABEL: ptrtoaddr_32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: movq %rdi, %rax +; CHECK-NEXT: notl %eax +; CHECK-NEXT: # kill: def $eax killed $eax killed $rax +; CHECK-NEXT: retq +entry: + %addr = ptrtoaddr ptr %p to i64 + %trunc = trunc i64 %addr to i32 + %ret = xor i32 %trunc, -1 + ret i32 %ret +} + +define i64 @ptrtoaddr_64(ptr %p) { +; CHECK-LABEL: ptrtoaddr_64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: movq %rdi, %rax +; CHECK-NEXT: notq %rax +; CHECK-NEXT: retq +entry: + %addr = ptrtoaddr ptr %p to i64 + %ret = xor i64 %addr, -1 + ret i64 %ret +} + +define i128 @ptrtoaddr_128(ptr %p) { +; CHECK-LABEL: ptrtoaddr_128: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: movq %rdi, %rax +; CHECK-NEXT: xorl %edx, %edx +; CHECK-NEXT: notq %rax +; CHECK-NEXT: notq %rdx +; CHECK-NEXT: retq +entry: + %addr = ptrtoaddr ptr %p to i64 + %ext = zext i64 %addr to i128 + %ret = xor i128 %ext, -1 + ret i128 %ret +} + +; TODO: Vector version cannot be handled by GlobalIsel yet (same error as ptrtoint: https://github.com/llvm/llvm-project/issues/150875). +; define <2 x i64> @ptrtoaddr_vec(<2 x ptr> %p) { +; entry: +; %addr = ptrtoaddr <2 x ptr> %p to <2 x i64> +; %ret = xor <2 x i64> %addr, <i64 -1, i64 -1> +; ret <2 x i64> %ret +;} + +; UTC_ARGS: --disable + +@foo = global [16 x i8] zeroinitializer +@addr = global i64 ptrtoaddr (ptr @foo to i64) +; CHECK: addr: +; CHECK-NEXT: .quad foo +; CHECK-NEXT: .size addr, 8 +@addr_plus_one = global i64 ptrtoaddr (ptr getelementptr (i8, ptr @foo, i64 1) to i64) +; CHECK: addr_plus_one: +; CHECK-NEXT: .quad foo+1 +; CHECK-NEXT: .size addr_plus_one, 8 +@const_addr = global i64 ptrtoaddr (ptr getelementptr (i8, ptr null, i64 1) to i64) +; CHECK: const_addr: +; CHECK-NEXT: .quad 0+1 +; CHECK-NEXT: .size const_addr, 8 diff --git a/llvm/test/CodeGen/X86/pr140491-sincos-lifetimes.ll b/llvm/test/CodeGen/X86/pr140491-sincos-lifetimes.ll index 2ca99bd..58dfd63 100644 --- a/llvm/test/CodeGen/X86/pr140491-sincos-lifetimes.ll +++ b/llvm/test/CodeGen/X86/pr140491-sincos-lifetimes.ll @@ -51,20 +51,20 @@ entry: %sincos = tail call { float, float } @llvm.sincos.f32(float %in) %sin = extractvalue { float, float } %sincos, 0 %cos = extractvalue { float, float } %sincos, 1 - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %computed) + call void @llvm.lifetime.start.p0(ptr nonnull %computed) store float %cos, ptr %computed, align 4 call void @use_ptr(ptr nonnull %computed) - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %computed) - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %computed1) + call void @llvm.lifetime.end.p0(ptr nonnull %computed) + call void @llvm.lifetime.start.p0(ptr nonnull %computed1) %fneg_sin = fneg float %sin store float %fneg_sin, ptr %computed1, align 4 call void @use_ptr(ptr nonnull %computed1) - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %computed1) - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %computed3) + call void @llvm.lifetime.end.p0(ptr nonnull %computed1) + call void @llvm.lifetime.start.p0(ptr nonnull %computed3) %fneg_cos = fneg float %cos store float %fneg_cos, ptr %computed3, align 4 call void @use_ptr(ptr nonnull %computed3) - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %computed3) + call void @llvm.lifetime.end.p0(ptr nonnull %computed3) ret i32 0 } diff --git a/llvm/test/CodeGen/X86/ptrtoaddr.ll b/llvm/test/CodeGen/X86/ptrtoaddr.ll new file mode 100644 index 0000000..24bf9db --- /dev/null +++ b/llvm/test/CodeGen/X86/ptrtoaddr.ll @@ -0,0 +1,113 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=x86_64-linux-gnu < %s -o - | FileCheck %s --check-prefix=CHECK + +define i1 @ptrtoaddr_1(ptr %p) { +; CHECK-LABEL: ptrtoaddr_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: movq %rdi, %rax +; CHECK-NEXT: xorb $1, %al +; CHECK-NEXT: # kill: def $al killed $al killed $rax +; CHECK-NEXT: retq +entry: + %addr = ptrtoaddr ptr %p to i64 + %trunc = trunc i64 %addr to i1 + %ret = xor i1 %trunc, 1 + ret i1 %ret +} + +define i8 @ptrtoaddr_8(ptr %p) { +; CHECK-LABEL: ptrtoaddr_8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: movq %rdi, %rax +; CHECK-NEXT: notb %al +; CHECK-NEXT: # kill: def $al killed $al killed $rax +; CHECK-NEXT: retq +entry: + %addr = ptrtoaddr ptr %p to i64 + %trunc = trunc i64 %addr to i8 + %ret = xor i8 %trunc, -1 + ret i8 %ret +} + +define i16 @ptrtoaddr_16(ptr %p) { +; CHECK-LABEL: ptrtoaddr_16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: movq %rdi, %rax +; CHECK-NEXT: notl %eax +; CHECK-NEXT: # kill: def $ax killed $ax killed $rax +; CHECK-NEXT: retq +entry: + %addr = ptrtoaddr ptr %p to i64 + %trunc = trunc i64 %addr to i16 + %ret = xor i16 %trunc, -1 + ret i16 %ret +} + +define i32 @ptrtoaddr_32(ptr %p) { +; CHECK-LABEL: ptrtoaddr_32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: movq %rdi, %rax +; CHECK-NEXT: notl %eax +; CHECK-NEXT: # kill: def $eax killed $eax killed $rax +; CHECK-NEXT: retq +entry: + %addr = ptrtoaddr ptr %p to i64 + %trunc = trunc i64 %addr to i32 + %ret = xor i32 %trunc, -1 + ret i32 %ret +} + +define i64 @ptrtoaddr_64(ptr %p) { +; CHECK-LABEL: ptrtoaddr_64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: movq %rdi, %rax +; CHECK-NEXT: notq %rax +; CHECK-NEXT: retq +entry: + %addr = ptrtoaddr ptr %p to i64 + %ret = xor i64 %addr, -1 + ret i64 %ret +} + +define i128 @ptrtoaddr_128(ptr %p) { +; CHECK-LABEL: ptrtoaddr_128: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: movq %rdi, %rax +; CHECK-NEXT: notq %rax +; CHECK-NEXT: movq $-1, %rdx +; CHECK-NEXT: retq +entry: + %addr = ptrtoaddr ptr %p to i64 + %ext = zext i64 %addr to i128 + %ret = xor i128 %ext, -1 + ret i128 %ret +} + + +define <2 x i64> @ptrtoaddr_vec(<2 x ptr> %p) { +; CHECK-LABEL: ptrtoaddr_vec: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pcmpeqd %xmm1, %xmm1 +; CHECK-NEXT: pxor %xmm1, %xmm0 +; CHECK-NEXT: retq +entry: + %addr = ptrtoaddr <2 x ptr> %p to <2 x i64> + %ret = xor <2 x i64> %addr, <i64 -1, i64 -1> + ret <2 x i64> %ret +} + +; UTC_ARGS: --disable + +@foo = global [16 x i8] zeroinitializer +@addr = global i64 ptrtoaddr (ptr @foo to i64) +; CHECK: addr: +; CHECK-NEXT: .quad foo +; CHECK-NEXT: .size addr, 8 +@addr_plus_one = global i64 ptrtoaddr (ptr getelementptr (i8, ptr @foo, i64 1) to i64) +; CHECK: addr_plus_one: +; CHECK-NEXT: .quad foo+1 +; CHECK-NEXT: .size addr_plus_one, 8 +@const_addr = global i64 ptrtoaddr (ptr getelementptr (i8, ptr null, i64 1) to i64) +; CHECK: const_addr: +; CHECK-NEXT: .quad 0+1 +; CHECK-NEXT: .size const_addr, 8 diff --git a/llvm/test/CodeGen/X86/select-optimize.ll b/llvm/test/CodeGen/X86/select-optimize.ll index c7cf9cb..6cb49f2 100644 --- a/llvm/test/CodeGen/X86/select-optimize.ll +++ b/llvm/test/CodeGen/X86/select-optimize.ll @@ -233,7 +233,7 @@ define i32 @expensive_val_operand5(i32 %b, i32 %y, i1 %cmp) { ; CHECK-LABEL: @expensive_val_operand5( ; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[LOAD:%.*]] = load i32, ptr [[A]], align 8 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr nonnull [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[A]]) ; CHECK-NEXT: [[CMP_FROZEN:%.*]] = freeze i1 [[CMP:%.*]] ; CHECK-NEXT: br i1 [[CMP_FROZEN]], label [[SELECT_TRUE_SINK:%.*]], label [[SELECT_END:%.*]], !prof [[PROF18]] ; CHECK: select.true.sink: @@ -245,7 +245,7 @@ define i32 @expensive_val_operand5(i32 %b, i32 %y, i1 %cmp) { ; %a = alloca i32 %load = load i32, ptr %a, align 8 - call void @llvm.lifetime.end.p0(i64 2, ptr nonnull %a) + call void @llvm.lifetime.end.p0(ptr nonnull %a) %x = add i32 %load, %b %sel = select i1 %cmp, i32 %x, i32 %y, !prof !17 ret i32 %sel @@ -520,7 +520,7 @@ for.body: ; preds = %for.body.preheader, declare void @llvm.dbg.value(metadata, metadata, metadata) ; Function Attrs: argmemonly mustprogress nocallback nofree nosync nounwind willreturn -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) declare void @free(ptr nocapture) diff --git a/llvm/test/DebugInfo/KeyInstructions/debugify.ll b/llvm/test/DebugInfo/KeyInstructions/debugify.ll index 551ae27..d3be513 100644 --- a/llvm/test/DebugInfo/KeyInstructions/debugify.ll +++ b/llvm/test/DebugInfo/KeyInstructions/debugify.ll @@ -1,10 +1,7 @@ ; RUN: opt -passes=debugify --debugify-atoms -S -o - < %s \ ; RUN: | FileCheck %s -;; Mirrors llvm/test/DebugInfo/debugify.ll. Split out here because the -;; test is only supported if LLVM_EXPERIMENTAL_KEY_INSTRUCTIONS is enabled -;; (which is a condition for running this test directory). Once the conditional -;; compilation of the feature is removed this can be merged into the original. +;; Mirrors llvm/test/DebugInfo/debugify.ll ; CHECK-LABEL: define void @foo define void @foo() { diff --git a/llvm/test/DebugInfo/KeyInstructions/lit.local.cfg b/llvm/test/DebugInfo/KeyInstructions/lit.local.cfg deleted file mode 100644 index 482bd5c..0000000 --- a/llvm/test/DebugInfo/KeyInstructions/lit.local.cfg +++ /dev/null @@ -1,2 +0,0 @@ -if not config.has_key_instructions: - config.unsupported = True diff --git a/llvm/test/Instrumentation/AddressSanitizer/lifetime-uar-uas.ll b/llvm/test/Instrumentation/AddressSanitizer/lifetime-uar-uas.ll index 3685d8d..aad8940 100644 --- a/llvm/test/Instrumentation/AddressSanitizer/lifetime-uar-uas.ll +++ b/llvm/test/Instrumentation/AddressSanitizer/lifetime-uar-uas.ll @@ -8,9 +8,6 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind - define i32 @basic_test(i64 %i) sanitize_address { ; CHECK-LABEL: define i32 @basic_test( @@ -22,7 +19,7 @@ entry: ; CHECK-UAS: store i64 -868082052615769615, ptr %{{[0-9]+}} ; CHECK-UAS-SS-NOT: store i64 - call void @llvm.lifetime.start.p0(i64 2, ptr %c) + call void @llvm.lifetime.start.p0(ptr %c) ; Memory is unpoisoned at llvm.lifetime.start: 01 ; CHECK-UAS: store i8 2, ptr %{{[0-9]+}} @@ -30,7 +27,7 @@ entry: store volatile i32 0, ptr %retval store volatile i8 0, ptr %ci, align 1 - call void @llvm.lifetime.end.p0(i64 2, ptr %c) + call void @llvm.lifetime.end.p0(ptr %c) ; Memory is poisoned at llvm.lifetime.end: F8 ; CHECK-UAS: store i8 -8, ptr %{{[0-9]+}} ; CHECK-UAS-SS-NOT: store i8 -8, diff --git a/llvm/test/Instrumentation/AddressSanitizer/lifetime.ll b/llvm/test/Instrumentation/AddressSanitizer/lifetime.ll index 9594370..d1e0180 100644 --- a/llvm/test/Instrumentation/AddressSanitizer/lifetime.ll +++ b/llvm/test/Instrumentation/AddressSanitizer/lifetime.ll @@ -9,73 +9,10 @@ target triple = "x86_64-unknown-linux-gnu" declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind -define void @lifetime_no_size(i64 %i) sanitize_address { -; CHECK-LABEL: define void @lifetime_no_size( -; CHECK-SAME: i64 [[I:%.*]]) #[[ATTR1:[0-9]+]] { -; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[MYALLOCA:%.*]] = alloca i8, i64 64, align 32 -; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[MYALLOCA]] to i64 -; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[TMP0]], 32 -; CHECK-NEXT: [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr -; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP0]] to ptr -; CHECK-NEXT: store i64 1102416563, ptr [[TMP3]], align 8 -; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[TMP0]], 8 -; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr -; CHECK-NEXT: store i64 ptrtoint (ptr @___asan_gen_stack to i64), ptr [[TMP5]], align 8 -; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[TMP0]], 16 -; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr -; CHECK-NEXT: store i64 ptrtoint (ptr @lifetime_no_size to i64), ptr [[TMP7]], align 8 -; CHECK-NEXT: [[TMP8:%.*]] = lshr i64 [[TMP0]], 3 -; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[TMP8]], 2147450880 -; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[TMP9]], 0 -; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr -; CHECK-NEXT: store i64 -868083117767659023, ptr [[TMP11]], align 1 -; CHECK-NEXT: [[AI:%.*]] = getelementptr inbounds [2 x i32], ptr [[TMP2]], i64 0, i64 [[I]] -; CHECK-NEXT: [[TMP12:%.*]] = ptrtoint ptr [[AI]] to i64 -; CHECK-NEXT: [[TMP13:%.*]] = lshr i64 [[TMP12]], 3 -; CHECK-NEXT: [[TMP14:%.*]] = add i64 [[TMP13]], 2147450880 -; CHECK-NEXT: [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr -; CHECK-NEXT: [[TMP16:%.*]] = load i8, ptr [[TMP15]], align 1 -; CHECK-NEXT: [[TMP17:%.*]] = icmp ne i8 [[TMP16]], 0 -; CHECK-NEXT: br i1 [[TMP17]], label %[[BB18:.*]], label %[[BB23:.*]], !prof [[PROF1:![0-9]+]] -; CHECK: [[BB18]]: -; CHECK-NEXT: [[TMP19:%.*]] = and i64 [[TMP12]], 7 -; CHECK-NEXT: [[TMP20:%.*]] = trunc i64 [[TMP19]] to i8 -; CHECK-NEXT: [[TMP21:%.*]] = icmp sge i8 [[TMP20]], [[TMP16]] -; CHECK-NEXT: br i1 [[TMP21]], label %[[BB22:.*]], label %[[BB23]] -; CHECK: [[BB22]]: -; CHECK-NEXT: call void @__asan_report_store1(i64 [[TMP12]]) #[[ATTR4:[0-9]+]] -; CHECK-NEXT: unreachable -; CHECK: [[BB23]]: -; CHECK-NEXT: store volatile i8 0, ptr [[AI]], align 4 -; CHECK-NEXT: store i64 1172321806, ptr [[TMP3]], align 8 -; CHECK-NEXT: [[TMP24:%.*]] = add i64 [[TMP9]], 0 -; CHECK-NEXT: [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr -; CHECK-NEXT: store i64 0, ptr [[TMP25]], align 1 -; CHECK-NEXT: ret void -; -entry: - %a = alloca [2 x i32], align 4 - - ; Poison memory in prologue: 0xf3f3f300f1f1f1f1 - - call void @llvm.lifetime.start.p0(i64 -1, ptr %a) - ; Check that lifetime with no size are ignored. - - %ai = getelementptr inbounds [2 x i32], ptr %a, i64 0, i64 %i - store volatile i8 0, ptr %ai, align 4 - - call void @llvm.lifetime.end.p0(i64 -1, ptr %a) - ; Check that lifetime with no size are ignored. - - ; Unpoison stack frame on exit. - ret void -} - ; Generic case of lifetime analysis. define void @lifetime() sanitize_address { ; CHECK-DEFAULT-LABEL: define void @lifetime( -; CHECK-DEFAULT-SAME: ) #[[ATTR1]] { +; CHECK-DEFAULT-SAME: ) #[[ATTR0:[0-9]+]] { ; CHECK-DEFAULT-NEXT: [[TMP1:%.*]] = alloca i64, align 32 ; CHECK-DEFAULT-NEXT: store i64 0, ptr [[TMP1]], align 8 ; CHECK-DEFAULT-NEXT: [[MYALLOCA:%.*]] = alloca i8, i64 64, align 32 @@ -86,7 +23,7 @@ define void @lifetime() sanitize_address { ; CHECK-DEFAULT-NEXT: store i64 1102416563, ptr [[TMP5]], align 8 ; CHECK-DEFAULT-NEXT: [[TMP6:%.*]] = add i64 [[TMP2]], 8 ; CHECK-DEFAULT-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr -; CHECK-DEFAULT-NEXT: store i64 ptrtoint (ptr @___asan_gen_stack.1 to i64), ptr [[TMP7]], align 8 +; CHECK-DEFAULT-NEXT: store i64 ptrtoint (ptr @___asan_gen_stack to i64), ptr [[TMP7]], align 8 ; CHECK-DEFAULT-NEXT: [[TMP8:%.*]] = add i64 [[TMP2]], 16 ; CHECK-DEFAULT-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr ; CHECK-DEFAULT-NEXT: store i64 ptrtoint (ptr @lifetime to i64), ptr [[TMP9]], align 8 @@ -104,14 +41,14 @@ define void @lifetime() sanitize_address { ; CHECK-DEFAULT-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr ; CHECK-DEFAULT-NEXT: [[TMP20:%.*]] = load i8, ptr [[TMP19]], align 1 ; CHECK-DEFAULT-NEXT: [[TMP21:%.*]] = icmp ne i8 [[TMP20]], 0 -; CHECK-DEFAULT-NEXT: br i1 [[TMP21]], label %[[BB22:.*]], label %[[BB27:.*]], !prof [[PROF1]] +; CHECK-DEFAULT-NEXT: br i1 [[TMP21]], label %[[BB22:.*]], label %[[BB27:.*]], !prof [[PROF1:![0-9]+]] ; CHECK-DEFAULT: [[BB22]]: ; CHECK-DEFAULT-NEXT: [[TMP23:%.*]] = and i64 [[TMP16]], 7 ; CHECK-DEFAULT-NEXT: [[TMP24:%.*]] = trunc i64 [[TMP23]] to i8 ; CHECK-DEFAULT-NEXT: [[TMP25:%.*]] = icmp sge i8 [[TMP24]], [[TMP20]] ; CHECK-DEFAULT-NEXT: br i1 [[TMP25]], label %[[BB26:.*]], label %[[BB27]] ; CHECK-DEFAULT: [[BB26]]: -; CHECK-DEFAULT-NEXT: call void @__asan_report_store1(i64 [[TMP16]]) #[[ATTR4]] +; CHECK-DEFAULT-NEXT: call void @__asan_report_store1(i64 [[TMP16]]) #[[ATTR4:[0-9]+]] ; CHECK-DEFAULT-NEXT: unreachable ; CHECK-DEFAULT: [[BB27]]: ; CHECK-DEFAULT-NEXT: store volatile i8 0, ptr [[TMP4]], align 1 @@ -182,7 +119,7 @@ define void @lifetime() sanitize_address { ; CHECK-DEFAULT-NEXT: ret void ; ; CHECK-NO-DYNAMIC-LABEL: define void @lifetime( -; CHECK-NO-DYNAMIC-SAME: ) #[[ATTR1]] { +; CHECK-NO-DYNAMIC-SAME: ) #[[ATTR0:[0-9]+]] { ; CHECK-NO-DYNAMIC-NEXT: [[MYALLOCA:%.*]] = alloca i8, i64 64, align 32 ; CHECK-NO-DYNAMIC-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[MYALLOCA]] to i64 ; CHECK-NO-DYNAMIC-NEXT: [[TMP2:%.*]] = add i64 [[TMP1]], 32 @@ -191,7 +128,7 @@ define void @lifetime() sanitize_address { ; CHECK-NO-DYNAMIC-NEXT: store i64 1102416563, ptr [[TMP4]], align 8 ; CHECK-NO-DYNAMIC-NEXT: [[TMP5:%.*]] = add i64 [[TMP1]], 8 ; CHECK-NO-DYNAMIC-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr -; CHECK-NO-DYNAMIC-NEXT: store i64 ptrtoint (ptr @___asan_gen_stack.1 to i64), ptr [[TMP6]], align 8 +; CHECK-NO-DYNAMIC-NEXT: store i64 ptrtoint (ptr @___asan_gen_stack to i64), ptr [[TMP6]], align 8 ; CHECK-NO-DYNAMIC-NEXT: [[TMP7:%.*]] = add i64 [[TMP1]], 16 ; CHECK-NO-DYNAMIC-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr ; CHECK-NO-DYNAMIC-NEXT: store i64 ptrtoint (ptr @lifetime to i64), ptr [[TMP8]], align 8 @@ -209,14 +146,14 @@ define void @lifetime() sanitize_address { ; CHECK-NO-DYNAMIC-NEXT: [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr ; CHECK-NO-DYNAMIC-NEXT: [[TMP19:%.*]] = load i8, ptr [[TMP18]], align 1 ; CHECK-NO-DYNAMIC-NEXT: [[TMP20:%.*]] = icmp ne i8 [[TMP19]], 0 -; CHECK-NO-DYNAMIC-NEXT: br i1 [[TMP20]], label %[[BB21:.*]], label %[[BB26:.*]], !prof [[PROF1]] +; CHECK-NO-DYNAMIC-NEXT: br i1 [[TMP20]], label %[[BB21:.*]], label %[[BB26:.*]], !prof [[PROF1:![0-9]+]] ; CHECK-NO-DYNAMIC: [[BB21]]: ; CHECK-NO-DYNAMIC-NEXT: [[TMP22:%.*]] = and i64 [[TMP15]], 7 ; CHECK-NO-DYNAMIC-NEXT: [[TMP23:%.*]] = trunc i64 [[TMP22]] to i8 ; CHECK-NO-DYNAMIC-NEXT: [[TMP24:%.*]] = icmp sge i8 [[TMP23]], [[TMP19]] ; CHECK-NO-DYNAMIC-NEXT: br i1 [[TMP24]], label %[[BB25:.*]], label %[[BB26]] ; CHECK-NO-DYNAMIC: [[BB25]]: -; CHECK-NO-DYNAMIC-NEXT: call void @__asan_report_store1(i64 [[TMP15]]) #[[ATTR4]] +; CHECK-NO-DYNAMIC-NEXT: call void @__asan_report_store1(i64 [[TMP15]]) #[[ATTR4:[0-9]+]] ; CHECK-NO-DYNAMIC-NEXT: unreachable ; CHECK-NO-DYNAMIC: [[BB26]]: ; CHECK-NO-DYNAMIC-NEXT: store volatile i8 0, ptr [[TMP3]], align 1 @@ -227,7 +164,7 @@ define void @lifetime() sanitize_address { ; CHECK-NO-DYNAMIC-NEXT: [[TMP30:%.*]] = inttoptr i64 [[TMP29]] to ptr ; CHECK-NO-DYNAMIC-NEXT: store i8 -8, ptr [[TMP30]], align 1 ; CHECK-NO-DYNAMIC-NEXT: [[ARR:%.*]] = alloca [10 x i32], align 16 -; CHECK-NO-DYNAMIC-NEXT: call void @llvm.lifetime.start.p0(i64 40, ptr [[ARR]]) +; CHECK-NO-DYNAMIC-NEXT: call void @llvm.lifetime.start.p0(ptr [[ARR]]) ; CHECK-NO-DYNAMIC-NEXT: [[TMP31:%.*]] = ptrtoint ptr [[ARR]] to i64 ; CHECK-NO-DYNAMIC-NEXT: [[TMP32:%.*]] = lshr i64 [[TMP31]], 3 ; CHECK-NO-DYNAMIC-NEXT: [[TMP33:%.*]] = add i64 [[TMP32]], 2147450880 @@ -245,7 +182,7 @@ define void @lifetime() sanitize_address { ; CHECK-NO-DYNAMIC-NEXT: unreachable ; CHECK-NO-DYNAMIC: [[BB42]]: ; CHECK-NO-DYNAMIC-NEXT: store volatile i8 0, ptr [[ARR]], align 1 -; CHECK-NO-DYNAMIC-NEXT: call void @llvm.lifetime.end.p0(i64 40, ptr [[ARR]]) +; CHECK-NO-DYNAMIC-NEXT: call void @llvm.lifetime.end.p0(ptr [[ARR]]) ; CHECK-NO-DYNAMIC-NEXT: [[TMP43:%.*]] = add i64 [[TMP10]], 4 ; CHECK-NO-DYNAMIC-NEXT: [[TMP44:%.*]] = inttoptr i64 [[TMP43]] to ptr ; CHECK-NO-DYNAMIC-NEXT: store i8 4, ptr [[TMP44]], align 1 @@ -318,8 +255,8 @@ define void @zero_sized(i64 %a) #0 { ; CHECK-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 ; CHECK-NEXT: [[B:%.*]] = alloca [0 x i8], align 1 ; CHECK-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 0, ptr [[B]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 0, ptr [[B]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B]]) ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Instrumentation/AddressSanitizer/remove-memory-effects.ll b/llvm/test/Instrumentation/AddressSanitizer/remove-memory-effects.ll index 481e780..07b28f4 100644 --- a/llvm/test/Instrumentation/AddressSanitizer/remove-memory-effects.ll +++ b/llvm/test/Instrumentation/AddressSanitizer/remove-memory-effects.ll @@ -10,9 +10,9 @@ declare void @foo(ptr writeonly) memory(argmem: write) define void @bar() sanitize_address { entry: %x = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr %x) + call void @llvm.lifetime.start.p0(ptr %x) call void @foo(ptr %x) - call void @llvm.lifetime.end.p0(i64 4, ptr %x) + call void @llvm.lifetime.end.p0(ptr %x) ret void } diff --git a/llvm/test/Instrumentation/HWAddressSanitizer/RISCV/exception-lifetime.ll b/llvm/test/Instrumentation/HWAddressSanitizer/RISCV/exception-lifetime.ll index ac5d8b8..37b280c 100644 --- a/llvm/test/Instrumentation/HWAddressSanitizer/RISCV/exception-lifetime.ll +++ b/llvm/test/Instrumentation/HWAddressSanitizer/RISCV/exception-lifetime.ll @@ -10,8 +10,8 @@ target triple = "riscv64-unknown-linux" declare void @mayFail(ptr %x) sanitize_hwaddress declare void @onExcept(ptr %x) sanitize_hwaddress -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind +declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind +declare void @llvm.lifetime.end.p0(ptr nocapture) nounwind declare i32 @__gxx_personality_v0(...) define void @test() sanitize_hwaddress personality ptr @__gxx_personality_v0 { @@ -46,7 +46,7 @@ define void @test() sanitize_hwaddress personality ptr @__gxx_personality_v0 { ; CHECK-NEXT: [[X_HWASAN:%.*]] = inttoptr i64 [[TMP19]] to ptr ; CHECK-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8 ; CHECK-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]]) ; CHECK-NEXT: [[TMP20:%.*]] = trunc i64 [[TMP15]] to i8 ; CHECK-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[X]] to i64 ; CHECK-NEXT: [[TMP22:%.*]] = and i64 [[TMP21]], 72057594037927935 @@ -65,7 +65,7 @@ define void @test() sanitize_hwaddress personality ptr @__gxx_personality_v0 { ; CHECK-NEXT: [[TMP30:%.*]] = lshr i64 [[TMP29]], 4 ; CHECK-NEXT: [[TMP31:%.*]] = getelementptr i8, ptr [[TMP14]], i64 [[TMP30]] ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP31]], i8 [[TMP27]], i64 1, i1 false) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]]) ; CHECK-NEXT: ret void ; CHECK: lpad: ; CHECK-NEXT: [[TMP32:%.*]] = landingpad { ptr, i32 } @@ -81,7 +81,7 @@ define void @test() sanitize_hwaddress personality ptr @__gxx_personality_v0 { ; CHECK-NEXT: [[TMP38:%.*]] = lshr i64 [[TMP37]], 4 ; CHECK-NEXT: [[TMP39:%.*]] = getelementptr i8, ptr [[TMP14]], i64 [[TMP38]] ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP39]], i8 [[TMP35]], i64 1, i1 false) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]]) ; CHECK-NEXT: br label [[EH_RESUME:%.*]] ; CHECK: eh.resume: ; CHECK-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8 @@ -94,12 +94,12 @@ entry: %x = alloca i32, align 8 %exn.slot = alloca ptr, align 8 %ehselector.slot = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr %x) + call void @llvm.lifetime.start.p0(ptr %x) invoke void @mayFail(ptr %x) to label %invoke.cont unwind label %lpad invoke.cont: ; preds = %entry - call void @llvm.lifetime.end.p0(i64 4, ptr %x) + call void @llvm.lifetime.end.p0(ptr %x) ret void lpad: ; preds = %entry @@ -111,7 +111,7 @@ lpad: ; preds = %entry %2 = extractvalue { ptr, i32 } %0, 1 store i32 %2, ptr %ehselector.slot, align 4 call void @onExcept(ptr %x) #18 - call void @llvm.lifetime.end.p0(i64 4, ptr %x) + call void @llvm.lifetime.end.p0(ptr %x) br label %eh.resume eh.resume: ; preds = %lpad diff --git a/llvm/test/Instrumentation/HWAddressSanitizer/RISCV/use-after-scope-setjmp.ll b/llvm/test/Instrumentation/HWAddressSanitizer/RISCV/use-after-scope-setjmp.ll index db78c1f..d2949bf 100644 --- a/llvm/test/Instrumentation/HWAddressSanitizer/RISCV/use-after-scope-setjmp.ll +++ b/llvm/test/Instrumentation/HWAddressSanitizer/RISCV/use-after-scope-setjmp.ll @@ -78,13 +78,13 @@ sw.bb1: ; preds = %entry br label %return while.body: ; preds = %entry - call void @llvm.lifetime.start.p0(i64 4096, ptr nonnull %buf) #10 + call void @llvm.lifetime.start.p0(ptr nonnull %buf) #10 store ptr %buf, ptr @stackbuf, align 8 ; may_jump may call longjmp, going back to the switch (and then the return), ; bypassing the lifetime.end. This is why we need to untag on the return, ; rather than the lifetime.end. call void @may_jump() - call void @llvm.lifetime.end.p0(i64 4096, ptr nonnull %buf) #10 + call void @llvm.lifetime.end.p0(ptr nonnull %buf) #10 br label %return return: ; preds = %entry, %while.body, %sw.bb1 @@ -94,5 +94,5 @@ return: ; preds = %entry, %while.body, declare i32 @setjmp(ptr noundef) returns_twice -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) diff --git a/llvm/test/Instrumentation/HWAddressSanitizer/X86/alloca.ll b/llvm/test/Instrumentation/HWAddressSanitizer/X86/alloca.ll index 292a565..ef86e63 100644 --- a/llvm/test/Instrumentation/HWAddressSanitizer/X86/alloca.ll +++ b/llvm/test/Instrumentation/HWAddressSanitizer/X86/alloca.ll @@ -109,7 +109,7 @@ define i32 @test_simple(ptr %a) sanitize_hwaddress { ; CHECK-NEXT: [[TMP8:%.*]] = shl i64 [[TMP5]], 57 ; CHECK-NEXT: [[TMP9:%.*]] = or i64 [[TMP7]], [[TMP8]] ; CHECK-NEXT: [[BUF_SROA_0_HWASAN:%.*]] = inttoptr i64 [[TMP9]] to ptr -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[BUF_SROA_0]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[BUF_SROA_0]]) ; CHECK-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP5]] to i8 ; CHECK-NEXT: call void @__hwasan_tag_memory(ptr [[BUF_SROA_0]], i8 [[TMP10]], i64 16) ; CHECK-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[BUF_SROA_0_HWASAN]] to i64 @@ -117,7 +117,7 @@ define i32 @test_simple(ptr %a) sanitize_hwaddress { ; CHECK-NEXT: store volatile i8 0, ptr [[BUF_SROA_0_HWASAN]], align 4 ; CHECK-NEXT: [[TMP12:%.*]] = trunc i64 [[HWASAN_UAR_TAG]] to i8 ; CHECK-NEXT: call void @__hwasan_tag_memory(ptr [[BUF_SROA_0]], i8 [[TMP12]], i64 16) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[BUF_SROA_0]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[BUF_SROA_0]]) ; CHECK-NEXT: ret i32 0 ; ; INLINE-LABEL: define i32 @test_simple @@ -150,7 +150,7 @@ define i32 @test_simple(ptr %a) sanitize_hwaddress { ; INLINE-NEXT: [[TMP19:%.*]] = shl i64 [[TMP16]], 57 ; INLINE-NEXT: [[TMP20:%.*]] = or i64 [[TMP18]], [[TMP19]] ; INLINE-NEXT: [[BUF_SROA_0_HWASAN:%.*]] = inttoptr i64 [[TMP20]] to ptr -; INLINE-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[BUF_SROA_0]]) +; INLINE-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[BUF_SROA_0]]) ; INLINE-NEXT: [[TMP21:%.*]] = trunc i64 [[TMP16]] to i8 ; INLINE-NEXT: [[TMP22:%.*]] = ptrtoint ptr [[BUF_SROA_0]] to i64 ; INLINE-NEXT: [[TMP23:%.*]] = and i64 [[TMP22]], -9079256848778919937 @@ -197,19 +197,19 @@ define i32 @test_simple(ptr %a) sanitize_hwaddress { ; INLINE-NEXT: [[TMP54:%.*]] = lshr i64 [[TMP53]], 4 ; INLINE-NEXT: [[TMP55:%.*]] = getelementptr i8, ptr [[TMP14]], i64 [[TMP54]] ; INLINE-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP55]], i8 [[TMP51]], i64 1, i1 false) -; INLINE-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[BUF_SROA_0]]) +; INLINE-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[BUF_SROA_0]]) ; INLINE-NEXT: ret i32 0 ; entry: %buf.sroa.0 = alloca i8, align 4 - call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %buf.sroa.0) + call void @llvm.lifetime.start.p0(ptr nonnull %buf.sroa.0) store volatile i8 0, ptr %buf.sroa.0, align 4 - call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %buf.sroa.0) + call void @llvm.lifetime.end.p0(ptr nonnull %buf.sroa.0) ret i32 0 } ; Function Attrs: argmemonly mustprogress nofree nosync nounwind willreturn -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) ; Function Attrs: argmemonly mustprogress nofree nosync nounwind willreturn -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) diff --git a/llvm/test/Instrumentation/HWAddressSanitizer/exception-lifetime.ll b/llvm/test/Instrumentation/HWAddressSanitizer/exception-lifetime.ll index 3e13eb4..f2ba94c 100644 --- a/llvm/test/Instrumentation/HWAddressSanitizer/exception-lifetime.ll +++ b/llvm/test/Instrumentation/HWAddressSanitizer/exception-lifetime.ll @@ -10,8 +10,8 @@ target triple = "aarch64--linux-android" declare void @mayFail(ptr %x) sanitize_hwaddress declare void @onExcept(ptr %x) sanitize_hwaddress -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind +declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind +declare void @llvm.lifetime.end.p0(ptr nocapture) nounwind declare i32 @__gxx_personality_v0(...) define void @test() sanitize_hwaddress personality ptr @__gxx_personality_v0 { @@ -48,7 +48,7 @@ define void @test() sanitize_hwaddress personality ptr @__gxx_personality_v0 { ; CHECK-NEXT: [[X_HWASAN:%.*]] = inttoptr i64 [[TMP21]] to ptr ; CHECK-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8 ; CHECK-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]]) ; CHECK-NEXT: [[TMP22:%.*]] = trunc i64 [[TMP17]] to i8 ; CHECK-NEXT: [[TMP23:%.*]] = ptrtoint ptr [[X]] to i64 ; CHECK-NEXT: [[TMP24:%.*]] = and i64 [[TMP23]], 72057594037927935 @@ -64,7 +64,7 @@ define void @test() sanitize_hwaddress personality ptr @__gxx_personality_v0 { ; CHECK-NEXT: [[TMP30:%.*]] = lshr i64 [[TMP29]], 4 ; CHECK-NEXT: [[TMP31:%.*]] = getelementptr i8, ptr [[TMP16]], i64 [[TMP30]] ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP31]], i8 [[TMP27]], i64 1, i1 false) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]]) ; CHECK-NEXT: ret void ; CHECK: lpad: ; CHECK-NEXT: [[TMP32:%.*]] = landingpad { ptr, i32 } @@ -82,7 +82,7 @@ define void @test() sanitize_hwaddress personality ptr @__gxx_personality_v0 { ; CHECK-NEXT: [[TMP39:%.*]] = lshr i64 [[TMP38]], 4 ; CHECK-NEXT: [[TMP40:%.*]] = getelementptr i8, ptr [[TMP16]], i64 [[TMP39]] ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP40]], i8 [[TMP36]], i64 1, i1 false) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]]) ; CHECK-NEXT: br label [[EH_RESUME:%.*]] ; CHECK: eh.resume: ; CHECK-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8 @@ -95,12 +95,12 @@ entry: %x = alloca i32, align 8 %exn.slot = alloca ptr, align 8 %ehselector.slot = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr %x) + call void @llvm.lifetime.start.p0(ptr %x) invoke void @mayFail(ptr %x) to label %invoke.cont unwind label %lpad invoke.cont: ; preds = %entry - call void @llvm.lifetime.end.p0(i64 4, ptr %x) + call void @llvm.lifetime.end.p0(ptr %x) ret void lpad: ; preds = %entry @@ -112,7 +112,7 @@ lpad: ; preds = %entry %2 = extractvalue { ptr, i32 } %0, 1 store i32 %2, ptr %ehselector.slot, align 4 call void @onExcept(ptr %x) #18 - call void @llvm.lifetime.end.p0(i64 4, ptr %x) + call void @llvm.lifetime.end.p0(ptr %x) br label %eh.resume eh.resume: ; preds = %lpad diff --git a/llvm/test/Instrumentation/HWAddressSanitizer/stack-coloring.ll b/llvm/test/Instrumentation/HWAddressSanitizer/stack-coloring.ll index ae6fe57..a40d964 100644 --- a/llvm/test/Instrumentation/HWAddressSanitizer/stack-coloring.ll +++ b/llvm/test/Instrumentation/HWAddressSanitizer/stack-coloring.ll @@ -16,22 +16,22 @@ define i32 @myCall_w2(i32 %in) sanitize_hwaddress { entry: %a = alloca [17 x ptr], align 8 %a2 = alloca [16 x ptr], align 8 - call void @llvm.lifetime.start.p0(i64 136, ptr %a) + call void @llvm.lifetime.start.p0(ptr %a) %t1 = call i32 @foo(i32 %in, ptr %a) %t2 = call i32 @foo(i32 %in, ptr %a) - call void @llvm.lifetime.end.p0(i64 136, ptr %a) - call void @llvm.lifetime.start.p0(i64 128, ptr %a2) + call void @llvm.lifetime.end.p0(ptr %a) + call void @llvm.lifetime.start.p0(ptr %a2) %t3 = call i32 @foo(i32 %in, ptr %a2) %t4 = call i32 @foo(i32 %in, ptr %a2) - call void @llvm.lifetime.end.p0(i64 128, ptr %a2) + call void @llvm.lifetime.end.p0(ptr %a2) %t5 = add i32 %t1, %t2 %t6 = add i32 %t3, %t4 %t7 = add i32 %t5, %t6 ret i32 %t7 } -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind +declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind +declare void @llvm.lifetime.end.p0(ptr nocapture) nounwind declare i32 @foo(i32, ptr) diff --git a/llvm/test/Instrumentation/HWAddressSanitizer/stack-safety-analysis.ll b/llvm/test/Instrumentation/HWAddressSanitizer/stack-safety-analysis.ll index 60af551..a76566b 100644 --- a/llvm/test/Instrumentation/HWAddressSanitizer/stack-safety-analysis.ll +++ b/llvm/test/Instrumentation/HWAddressSanitizer/stack-safety-analysis.ll @@ -24,9 +24,9 @@ entry: ; SAFETY-REMARKS: --- !Passed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: safeAlloca{{[[:space:]]}}Function: test_simple ; SAFETY-REMARKS: --- !Passed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: ignoreAccess{{[[:space:]]}}Function: test_simple %buf.sroa.0 = alloca i8, align 4 - call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %buf.sroa.0) + call void @llvm.lifetime.start.p0(ptr nonnull %buf.sroa.0) store volatile i8 0, ptr %buf.sroa.0, align 4, !tbaa !8 - call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %buf.sroa.0) + call void @llvm.lifetime.end.p0(ptr nonnull %buf.sroa.0) ret i32 0 } @@ -43,9 +43,9 @@ entry: ; SAFETY-REMARKS: --- !Passed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: safeAlloca{{[[:space:]]}}Function: test_cmpxchg ; SAFETY-REMARKS: --- !Passed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: ignoreAccess{{[[:space:]]}}Function: test_cmpxchg %buf.sroa.0 = alloca i8, align 4 - call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %buf.sroa.0) + call void @llvm.lifetime.start.p0(ptr nonnull %buf.sroa.0) %0 = cmpxchg ptr %buf.sroa.0, i8 1, i8 2 monotonic monotonic, align 4 - call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %buf.sroa.0) + call void @llvm.lifetime.end.p0(ptr nonnull %buf.sroa.0) ret i32 0 } @@ -62,9 +62,9 @@ entry: ; SAFETY-REMARKS: --- !Passed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: safeAlloca{{[[:space:]]}}Function: test_atomicrwm ; SAFETY-REMARKS: --- !Passed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: ignoreAccess{{[[:space:]]}}Function: test_atomicrwm %buf.sroa.0 = alloca i8, align 4 - call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %buf.sroa.0) + call void @llvm.lifetime.start.p0(ptr nonnull %buf.sroa.0) %0 = atomicrmw add ptr %buf.sroa.0, i8 1 monotonic, align 4 - call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %buf.sroa.0) + call void @llvm.lifetime.end.p0(ptr nonnull %buf.sroa.0) ret i32 0 } @@ -82,9 +82,9 @@ entry: ; SAFETY-REMARKS: --- !Passed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: ignoreAccess{{[[:space:]]}}Function: test_use %buf.sroa.0 = alloca i8, align 4 call void @use(ptr nonnull %buf.sroa.0) - call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %buf.sroa.0) + call void @llvm.lifetime.start.p0(ptr nonnull %buf.sroa.0) store volatile i8 0, ptr %buf.sroa.0, align 4, !tbaa !8 - call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %buf.sroa.0) + call void @llvm.lifetime.end.p0(ptr nonnull %buf.sroa.0) ret i32 0 } @@ -101,9 +101,9 @@ entry: ; SAFETY-REMARKS: --- !Passed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: safeAlloca{{[[:space:]]}}Function: test_in_range ; SAFETY-REMARKS: --- !Passed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: ignoreAccess{{[[:space:]]}}Function: test_in_range %buf.sroa.0 = alloca [10 x i8], align 4 - call void @llvm.lifetime.start.p0(i64 10, ptr nonnull %buf.sroa.0) + call void @llvm.lifetime.start.p0(ptr nonnull %buf.sroa.0) store volatile i8 0, ptr %buf.sroa.0, align 4, !tbaa !8 - call void @llvm.lifetime.end.p0(i64 10, ptr nonnull %buf.sroa.0) + call void @llvm.lifetime.end.p0(ptr nonnull %buf.sroa.0) ret i32 0 } @@ -121,9 +121,9 @@ entry: ; SAFETY-REMARKS: --- !Passed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: ignoreAccess{{[[:space:]]}}Function: test_in_range2 %buf.sroa.0 = alloca [10 x i8], align 4 %ptr = getelementptr [10 x i8], ptr %buf.sroa.0, i32 0, i32 9 - call void @llvm.lifetime.start.p0(i64 10, ptr nonnull %buf.sroa.0) + call void @llvm.lifetime.start.p0(ptr nonnull %buf.sroa.0) store volatile i8 0, ptr %ptr, align 4, !tbaa !8 - call void @llvm.lifetime.end.p0(i64 10, ptr nonnull %buf.sroa.0) + call void @llvm.lifetime.end.p0(ptr nonnull %buf.sroa.0) ret i32 0 } @@ -194,9 +194,9 @@ entry: ; SAFETY-REMARKS: --- !Missed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: ignoreAccess{{[[:space:]]}}Function: test_out_of_range %buf.sroa.0 = alloca [10 x i8], align 4 %ptr = getelementptr [10 x i8], ptr %buf.sroa.0, i32 0, i32 10 - call void @llvm.lifetime.start.p0(i64 10, ptr nonnull %buf.sroa.0) + call void @llvm.lifetime.start.p0(ptr nonnull %buf.sroa.0) store volatile i8 0, ptr %ptr, align 4, !tbaa !8 - call void @llvm.lifetime.end.p0(i64 10, ptr nonnull %buf.sroa.0) + call void @llvm.lifetime.end.p0(ptr nonnull %buf.sroa.0) ret i32 0 } @@ -213,9 +213,9 @@ entry: ; SAFETY-REMARKS: --- !Missed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: ignoreAccess{{[[:space:]]}}Function: test_out_of_range2 %buf.sroa.0 = alloca [10 x i8], align 4 %ptr = getelementptr [10 x i8], ptr %buf.sroa.0, i32 0, i32 10 - call void @llvm.lifetime.start.p0(i64 10, ptr nonnull %buf.sroa.0) + call void @llvm.lifetime.start.p0(ptr nonnull %buf.sroa.0) %0 = cmpxchg ptr %ptr, i8 1, i8 2 monotonic monotonic, align 4 - call void @llvm.lifetime.end.p0(i64 10, ptr nonnull %buf.sroa.0) + call void @llvm.lifetime.end.p0(ptr nonnull %buf.sroa.0) ret i32 0 } @@ -268,11 +268,11 @@ entry: %ptr = getelementptr [10 x i8], ptr %buf.sroa.0, i32 0, i32 9 %buf.sroa.1 = alloca [10 x i8], align 4 %ptr1 = getelementptr [10 x i8], ptr %buf.sroa.0, i32 0, i32 9 - call void @llvm.lifetime.start.p0(i64 10, ptr nonnull %buf.sroa.0) - call void @llvm.lifetime.end.p0(i64 10, ptr nonnull %buf.sroa.0) - call void @llvm.lifetime.start.p0(i64 10, ptr nonnull %buf.sroa.1) + call void @llvm.lifetime.start.p0(ptr nonnull %buf.sroa.0) + call void @llvm.lifetime.end.p0(ptr nonnull %buf.sroa.0) + call void @llvm.lifetime.start.p0(ptr nonnull %buf.sroa.1) call void @llvm.memmove.p0.p0.i32(ptr %ptr, ptr %ptr1, i32 1, i1 true) - call void @llvm.lifetime.end.p0(i64 10, ptr nonnull %buf.sroa.1) + call void @llvm.lifetime.end.p0(ptr nonnull %buf.sroa.1) ret i32 0 } @@ -289,31 +289,9 @@ entry: ; SAFETY-REMARKS: --- !Missed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: ignoreAccess{{[[:space:]]}}Function: test_out_of_range6 %buf.sroa.0 = alloca [10 x i8], align 4 %ptr = getelementptr [10 x i8], ptr %buf.sroa.0, i32 0, i32 10 - call void @llvm.lifetime.start.p0(i64 10, ptr nonnull %buf.sroa.0) + call void @llvm.lifetime.start.p0(ptr nonnull %buf.sroa.0) %0 = atomicrmw add ptr %ptr, i32 1 monotonic, align 4 - call void @llvm.lifetime.end.p0(i64 10, ptr nonnull %buf.sroa.0) - ret i32 0 -} - -; Check an alloca with potentially out of range GEP to ensure it gets a tag and -; check. -define i32 @test_potentially_out_of_range(ptr %a) sanitize_hwaddress { -entry: - ; CHECK-LABEL: @test_potentially_out_of_range - ; NOSAFETY: call {{.*}}__hwasan_generate_tag - ; NOSAFETY: call {{.*}}__hwasan_store - ; SAFETY: call {{.*}}__hwasan_generate_tag - ; SAFETY: call {{.*}}__hwasan_store - ; NOSTACK-NOT: call {{.*}}__hwasan_generate_tag - ; NOSTACK-NOT: call {{.*}}__hwasan_store - ; SAFETY-REMARKS: --- !Missed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: safeAlloca{{[[:space:]]}}Function: test_potentially_out_of_range - ; SAFETY-REMARKS: --- !Missed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: ignoreAccess{{[[:space:]]}}Function: test_potentially_out_of_range - %buf.sroa.0 = alloca [10 x i8], align 4 - %off = call i32 @getoffset() - %ptr = getelementptr [10 x i8], ptr %buf.sroa.0, i32 0, i32 %off - call void @llvm.lifetime.start.p0(i64 10, ptr nonnull %ptr) - store volatile i8 0, ptr %ptr, align 4, !tbaa !8 - call void @llvm.lifetime.end.p0(i64 10, ptr nonnull %ptr) + call void @llvm.lifetime.end.p0(ptr nonnull %buf.sroa.0) ret i32 0 } @@ -333,26 +311,6 @@ entry: call void @llvm.memmove.p0.p0.i32(ptr %ptr, ptr %a, i32 1, i1 true) ret i32 0 } -; Check an alloca with potentially out of range GEP to ensure it gets a tag and -; check. -define i32 @test_unclear(ptr %a) sanitize_hwaddress { -entry: - ; CHECK-LABEL: @test_unclear - ; NOSAFETY: call {{.*}}__hwasan_generate_tag - ; NOSAFETY: call {{.*}}__hwasan_store - ; SAFETY: call {{.*}}__hwasan_generate_tag - ; SAFETY: call {{.*}}__hwasan_store - ; NOSTACK-NOT: call {{.*}}__hwasan_generate_tag - ; NOSTACK: call {{.*}}__hwasan_store - ; SAFETY-REMARKS: --- !Missed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: safeAlloca{{[[:space:]]}}Function: test_unclear - ; SAFETY-REMARKS: --- !Missed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: ignoreAccess{{[[:space:]]}}Function: test_unclear - %buf.sroa.0 = alloca i8, align 4 - %ptr = call ptr @getptr(ptr %buf.sroa.0) - call void @llvm.lifetime.start.p0(i64 10, ptr nonnull %ptr) - store volatile i8 0, ptr %ptr, align 4, !tbaa !8 - call void @llvm.lifetime.end.p0(i64 10, ptr nonnull %ptr) - ret i32 0 -} define i32 @test_select(ptr %a) sanitize_hwaddress { entry: @@ -367,11 +325,11 @@ entry: ; SAFETY-REMARKS: --- !Missed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: ignoreAccess{{[[:space:]]}}Function: test_select %x = call ptr @getptr(ptr %a) %buf.sroa.0 = alloca i8, align 4 - call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %buf.sroa.0) + call void @llvm.lifetime.start.p0(ptr nonnull %buf.sroa.0) %c = call i1 @cond() %ptr = select i1 %c, ptr %x, ptr %buf.sroa.0 store volatile i8 0, ptr %ptr, align 4, !tbaa !8 - call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %buf.sroa.0) + call void @llvm.lifetime.end.p0(ptr nonnull %buf.sroa.0) ret i32 0 } @@ -388,10 +346,10 @@ entry: ; SAFETY-REMARKS: --- !Missed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: safeAlloca{{[[:space:]]}}Function: test_retptr ; SAFETY-REMARKS: --- !Passed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: ignoreAccess{{[[:space:]]}}Function: test_retptr %buf.sroa.0 = alloca i8, align 4 - call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %buf.sroa.0) + call void @llvm.lifetime.start.p0(ptr nonnull %buf.sroa.0) %ptr = call ptr @retptr(ptr %buf.sroa.0) store volatile i8 0, ptr %ptr, align 4, !tbaa !8 - call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %buf.sroa.0) + call void @llvm.lifetime.end.p0(ptr nonnull %buf.sroa.0) ret i32 0 } @@ -408,17 +366,17 @@ entry: ; SAFETY-REMARKS: --- !Passed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: safeAlloca{{[[:space:]]}}Function: test_lifetime_poison ; SAFETY-REMARKS: --- !Passed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: ignoreAccess{{[[:space:]]}}Function: test_lifetime_poison %buf.sroa.0 = alloca i8, align 4 - call void @llvm.lifetime.start.p0(i64 1, ptr poison) + call void @llvm.lifetime.start.p0(ptr poison) store volatile i8 0, ptr %buf.sroa.0, align 4, !tbaa !8 - call void @llvm.lifetime.end.p0(i64 1, ptr poison) + call void @llvm.lifetime.end.p0(ptr poison) ret i32 0 } ; Function Attrs: argmemonly mustprogress nofree nosync nounwind willreturn -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) ; Function Attrs: argmemonly mustprogress nofree nosync nounwind willreturn -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) declare void @llvm.memset.p0.i32(ptr, i8, i32, i1) declare void @llvm.memcpy.p0.p0.i32(ptr, ptr, i32, i1) diff --git a/llvm/test/Instrumentation/HWAddressSanitizer/use-after-scope-setjmp.ll b/llvm/test/Instrumentation/HWAddressSanitizer/use-after-scope-setjmp.ll index 57d37ca..af6411a 100644 --- a/llvm/test/Instrumentation/HWAddressSanitizer/use-after-scope-setjmp.ll +++ b/llvm/test/Instrumentation/HWAddressSanitizer/use-after-scope-setjmp.ll @@ -79,13 +79,13 @@ sw.bb1: ; preds = %entry br label %return while.body: ; preds = %entry - call void @llvm.lifetime.start.p0(i64 4096, ptr nonnull %buf) #10 + call void @llvm.lifetime.start.p0(ptr nonnull %buf) #10 store ptr %buf, ptr @stackbuf, align 8 ; may_jump may call longjmp, going back to the switch (and then the return), ; bypassing the lifetime.end. This is why we need to untag on the return, ; rather than the lifetime.end. call void @may_jump() - call void @llvm.lifetime.end.p0(i64 4096, ptr nonnull %buf) #10 + call void @llvm.lifetime.end.p0(ptr nonnull %buf) #10 br label %return return: ; preds = %entry, %while.body, %sw.bb1 @@ -95,5 +95,5 @@ return: ; preds = %entry, %while.body, declare i32 @setjmp(ptr noundef) returns_twice -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) diff --git a/llvm/test/Instrumentation/HWAddressSanitizer/use-after-scope.ll b/llvm/test/Instrumentation/HWAddressSanitizer/use-after-scope.ll index e30b518..cfded02 100644 --- a/llvm/test/Instrumentation/HWAddressSanitizer/use-after-scope.ll +++ b/llvm/test/Instrumentation/HWAddressSanitizer/use-after-scope.ll @@ -26,13 +26,13 @@ define dso_local i32 @standard_lifetime() local_unnamed_addr sanitize_hwaddress ; X86-SCOPE-NEXT: [[ALLOCA_0_HWASAN:%.*]] = inttoptr i64 [[TMP10]] to ptr ; X86-SCOPE-NEXT: br label [[TMP11:%.*]] ; X86-SCOPE: 11: -; X86-SCOPE-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[TMP4]]) +; X86-SCOPE-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP4]]) ; X86-SCOPE-NEXT: [[TMP12:%.*]] = trunc i64 [[TMP6]] to i8 ; X86-SCOPE-NEXT: call void @__hwasan_tag_memory(ptr [[TMP4]], i8 [[TMP12]], i64 16) ; X86-SCOPE-NEXT: [[TMP13:%.*]] = tail call i1 (...) @cond() ; X86-SCOPE-NEXT: [[TMP14:%.*]] = trunc i64 [[HWASAN_UAR_TAG]] to i8 ; X86-SCOPE-NEXT: call void @__hwasan_tag_memory(ptr [[TMP4]], i8 [[TMP14]], i64 16) -; X86-SCOPE-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[TMP4]]) +; X86-SCOPE-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP4]]) ; X86-SCOPE-NEXT: br i1 [[TMP13]], label [[TMP15:%.*]], label [[TMP11]] ; X86-SCOPE: 15: ; X86-SCOPE-NEXT: call void @use(ptr nonnull [[ALLOCA_0_HWASAN]]) @@ -96,7 +96,7 @@ define dso_local i32 @standard_lifetime() local_unnamed_addr sanitize_hwaddress ; AARCH64-SCOPE-NEXT: [[ALLOCA_0_HWASAN:%.*]] = inttoptr i64 [[TMP24]] to ptr ; AARCH64-SCOPE-NEXT: br label [[TMP25:%.*]] ; AARCH64-SCOPE: 25: -; AARCH64-SCOPE-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[TMP18]]) +; AARCH64-SCOPE-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP18]]) ; AARCH64-SCOPE-NEXT: [[TMP26:%.*]] = trunc i64 [[TMP20]] to i8 ; AARCH64-SCOPE-NEXT: [[TMP27:%.*]] = ptrtoint ptr [[TMP18]] to i64 ; AARCH64-SCOPE-NEXT: [[TMP28:%.*]] = and i64 [[TMP27]], 72057594037927935 @@ -110,7 +110,7 @@ define dso_local i32 @standard_lifetime() local_unnamed_addr sanitize_hwaddress ; AARCH64-SCOPE-NEXT: [[TMP35:%.*]] = lshr i64 [[TMP34]], 4 ; AARCH64-SCOPE-NEXT: [[TMP36:%.*]] = getelementptr i8, ptr [[TMP17]], i64 [[TMP35]] ; AARCH64-SCOPE-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP36]], i8 [[TMP32]], i64 1, i1 false) -; AARCH64-SCOPE-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[TMP18]]) +; AARCH64-SCOPE-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP18]]) ; AARCH64-SCOPE-NEXT: br i1 [[TMP31]], label [[TMP37:%.*]], label [[TMP25]] ; AARCH64-SCOPE: 37: ; AARCH64-SCOPE-NEXT: call void @use(ptr nonnull [[ALLOCA_0_HWASAN]]) @@ -198,7 +198,7 @@ define dso_local i32 @standard_lifetime() local_unnamed_addr sanitize_hwaddress ; AARCH64-SHORT-SCOPE-NEXT: [[ALLOCA_0_HWASAN:%.*]] = inttoptr i64 [[TMP24]] to ptr ; AARCH64-SHORT-SCOPE-NEXT: br label [[TMP25:%.*]] ; AARCH64-SHORT-SCOPE: 25: -; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[TMP18]]) +; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP18]]) ; AARCH64-SHORT-SCOPE-NEXT: [[TMP26:%.*]] = trunc i64 [[TMP20]] to i8 ; AARCH64-SHORT-SCOPE-NEXT: [[TMP27:%.*]] = ptrtoint ptr [[TMP18]] to i64 ; AARCH64-SHORT-SCOPE-NEXT: [[TMP28:%.*]] = and i64 [[TMP27]], 72057594037927935 @@ -215,7 +215,7 @@ define dso_local i32 @standard_lifetime() local_unnamed_addr sanitize_hwaddress ; AARCH64-SHORT-SCOPE-NEXT: [[TMP37:%.*]] = lshr i64 [[TMP36]], 4 ; AARCH64-SHORT-SCOPE-NEXT: [[TMP38:%.*]] = getelementptr i8, ptr [[TMP17]], i64 [[TMP37]] ; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP38]], i8 [[TMP34]], i64 1, i1 false) -; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[TMP18]]) +; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP18]]) ; AARCH64-SHORT-SCOPE-NEXT: br i1 [[TMP33]], label [[TMP39:%.*]], label [[TMP25]] ; AARCH64-SHORT-SCOPE: 39: ; AARCH64-SHORT-SCOPE-NEXT: call void @use(ptr nonnull [[ALLOCA_0_HWASAN]]) @@ -279,10 +279,10 @@ define dso_local i32 @standard_lifetime() local_unnamed_addr sanitize_hwaddress 2: ; preds = %2, %0 ; We should tag the memory after the br (in the loop). - call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %1) + call void @llvm.lifetime.start.p0(ptr nonnull %1) %3 = tail call i1 (...) @cond() #2 ; We should tag the memory before the next br (before the jump back). - call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %1) + call void @llvm.lifetime.end.p0(ptr nonnull %1) br i1 %3, label %4, label %2 4: ; preds = %2 @@ -307,13 +307,13 @@ define dso_local i32 @standard_lifetime_optnone() local_unnamed_addr optnone noi ; X86-SCOPE-NEXT: [[ALLOCA_0_HWASAN:%.*]] = inttoptr i64 [[TMP10]] to ptr ; X86-SCOPE-NEXT: br label [[TMP11:%.*]] ; X86-SCOPE: 11: -; X86-SCOPE-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[TMP4]]) +; X86-SCOPE-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP4]]) ; X86-SCOPE-NEXT: [[TMP12:%.*]] = trunc i64 [[TMP6]] to i8 ; X86-SCOPE-NEXT: call void @__hwasan_tag_memory(ptr [[TMP4]], i8 [[TMP12]], i64 16) ; X86-SCOPE-NEXT: [[TMP13:%.*]] = tail call i1 (...) @cond() ; X86-SCOPE-NEXT: [[TMP14:%.*]] = trunc i64 [[HWASAN_UAR_TAG]] to i8 ; X86-SCOPE-NEXT: call void @__hwasan_tag_memory(ptr [[TMP4]], i8 [[TMP14]], i64 16) -; X86-SCOPE-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[TMP4]]) +; X86-SCOPE-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP4]]) ; X86-SCOPE-NEXT: br i1 [[TMP13]], label [[TMP15:%.*]], label [[TMP11]] ; X86-SCOPE: 15: ; X86-SCOPE-NEXT: call void @use(ptr nonnull [[ALLOCA_0_HWASAN]]) @@ -377,7 +377,7 @@ define dso_local i32 @standard_lifetime_optnone() local_unnamed_addr optnone noi ; AARCH64-SCOPE-NEXT: [[ALLOCA_0_HWASAN:%.*]] = inttoptr i64 [[TMP24]] to ptr ; AARCH64-SCOPE-NEXT: br label [[TMP25:%.*]] ; AARCH64-SCOPE: 25: -; AARCH64-SCOPE-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[TMP18]]) +; AARCH64-SCOPE-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP18]]) ; AARCH64-SCOPE-NEXT: [[TMP26:%.*]] = trunc i64 [[TMP20]] to i8 ; AARCH64-SCOPE-NEXT: [[TMP27:%.*]] = ptrtoint ptr [[TMP18]] to i64 ; AARCH64-SCOPE-NEXT: [[TMP28:%.*]] = and i64 [[TMP27]], 72057594037927935 @@ -391,7 +391,7 @@ define dso_local i32 @standard_lifetime_optnone() local_unnamed_addr optnone noi ; AARCH64-SCOPE-NEXT: [[TMP35:%.*]] = lshr i64 [[TMP34]], 4 ; AARCH64-SCOPE-NEXT: [[TMP36:%.*]] = getelementptr i8, ptr [[TMP17]], i64 [[TMP35]] ; AARCH64-SCOPE-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP36]], i8 [[TMP32]], i64 1, i1 false) -; AARCH64-SCOPE-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[TMP18]]) +; AARCH64-SCOPE-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP18]]) ; AARCH64-SCOPE-NEXT: br i1 [[TMP31]], label [[TMP37:%.*]], label [[TMP25]] ; AARCH64-SCOPE: 37: ; AARCH64-SCOPE-NEXT: call void @use(ptr nonnull [[ALLOCA_0_HWASAN]]) @@ -479,7 +479,7 @@ define dso_local i32 @standard_lifetime_optnone() local_unnamed_addr optnone noi ; AARCH64-SHORT-SCOPE-NEXT: [[ALLOCA_0_HWASAN:%.*]] = inttoptr i64 [[TMP24]] to ptr ; AARCH64-SHORT-SCOPE-NEXT: br label [[TMP25:%.*]] ; AARCH64-SHORT-SCOPE: 25: -; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[TMP18]]) +; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP18]]) ; AARCH64-SHORT-SCOPE-NEXT: [[TMP26:%.*]] = trunc i64 [[TMP20]] to i8 ; AARCH64-SHORT-SCOPE-NEXT: [[TMP27:%.*]] = ptrtoint ptr [[TMP18]] to i64 ; AARCH64-SHORT-SCOPE-NEXT: [[TMP28:%.*]] = and i64 [[TMP27]], 72057594037927935 @@ -496,7 +496,7 @@ define dso_local i32 @standard_lifetime_optnone() local_unnamed_addr optnone noi ; AARCH64-SHORT-SCOPE-NEXT: [[TMP37:%.*]] = lshr i64 [[TMP36]], 4 ; AARCH64-SHORT-SCOPE-NEXT: [[TMP38:%.*]] = getelementptr i8, ptr [[TMP17]], i64 [[TMP37]] ; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP38]], i8 [[TMP34]], i64 1, i1 false) -; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[TMP18]]) +; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP18]]) ; AARCH64-SHORT-SCOPE-NEXT: br i1 [[TMP33]], label [[TMP39:%.*]], label [[TMP25]] ; AARCH64-SHORT-SCOPE: 39: ; AARCH64-SHORT-SCOPE-NEXT: call void @use(ptr nonnull [[ALLOCA_0_HWASAN]]) @@ -560,10 +560,10 @@ define dso_local i32 @standard_lifetime_optnone() local_unnamed_addr optnone noi 2: ; preds = %2, %0 ; We should tag the memory after the br (in the loop). - call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %1) + call void @llvm.lifetime.start.p0(ptr nonnull %1) %3 = tail call i1 (...) @cond() #2 ; We should tag the memory before the next br (before the jump back). - call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %1) + call void @llvm.lifetime.end.p0(ptr nonnull %1) br i1 %3, label %4, label %2 4: ; preds = %2 @@ -809,12 +809,12 @@ define dso_local i32 @multiple_lifetimes() local_unnamed_addr sanitize_hwaddress %1 = alloca i8, align 1 ; We erase lifetime markers if we insert instrumentation outside of the ; lifetime. - call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %1) + call void @llvm.lifetime.start.p0(ptr nonnull %1) call void @use(ptr nonnull %1) #2 - call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %1) - call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %1) + call void @llvm.lifetime.end.p0(ptr nonnull %1) + call void @llvm.lifetime.start.p0(ptr nonnull %1) call void @use(ptr nonnull %1) #2 - call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %1) + call void @llvm.lifetime.end.p0(ptr nonnull %1) ret i32 0 } @@ -833,7 +833,7 @@ define dso_local i32 @unreachable_exit() local_unnamed_addr sanitize_hwaddress { ; X86-SCOPE-NEXT: [[TMP9:%.*]] = shl i64 [[TMP6]], 57 ; X86-SCOPE-NEXT: [[TMP10:%.*]] = or i64 [[TMP8]], [[TMP9]] ; X86-SCOPE-NEXT: [[ALLOCA_0_HWASAN:%.*]] = inttoptr i64 [[TMP10]] to ptr -; X86-SCOPE-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[TMP4]]) +; X86-SCOPE-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP4]]) ; X86-SCOPE-NEXT: [[TMP11:%.*]] = trunc i64 [[TMP6]] to i8 ; X86-SCOPE-NEXT: call void @__hwasan_tag_memory(ptr [[TMP4]], i8 [[TMP11]], i64 16) ; X86-SCOPE-NEXT: [[TMP12:%.*]] = tail call i1 (...) @cond() @@ -906,7 +906,7 @@ define dso_local i32 @unreachable_exit() local_unnamed_addr sanitize_hwaddress { ; AARCH64-SCOPE-NEXT: [[TMP23:%.*]] = shl i64 [[TMP20]], 56 ; AARCH64-SCOPE-NEXT: [[TMP24:%.*]] = or i64 [[TMP22]], [[TMP23]] ; AARCH64-SCOPE-NEXT: [[ALLOCA_0_HWASAN:%.*]] = inttoptr i64 [[TMP24]] to ptr -; AARCH64-SCOPE-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[TMP18]]) +; AARCH64-SCOPE-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP18]]) ; AARCH64-SCOPE-NEXT: [[TMP25:%.*]] = trunc i64 [[TMP20]] to i8 ; AARCH64-SCOPE-NEXT: [[TMP26:%.*]] = ptrtoint ptr [[TMP18]] to i64 ; AARCH64-SCOPE-NEXT: [[TMP27:%.*]] = and i64 [[TMP26]], 72057594037927935 @@ -1019,7 +1019,7 @@ define dso_local i32 @unreachable_exit() local_unnamed_addr sanitize_hwaddress { ; AARCH64-SHORT-SCOPE-NEXT: [[TMP23:%.*]] = shl i64 [[TMP20]], 56 ; AARCH64-SHORT-SCOPE-NEXT: [[TMP24:%.*]] = or i64 [[TMP22]], [[TMP23]] ; AARCH64-SHORT-SCOPE-NEXT: [[ALLOCA_0_HWASAN:%.*]] = inttoptr i64 [[TMP24]] to ptr -; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[TMP18]]) +; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP18]]) ; AARCH64-SHORT-SCOPE-NEXT: [[TMP25:%.*]] = trunc i64 [[TMP20]] to i8 ; AARCH64-SHORT-SCOPE-NEXT: [[TMP26:%.*]] = ptrtoint ptr [[TMP18]] to i64 ; AARCH64-SHORT-SCOPE-NEXT: [[TMP27:%.*]] = and i64 [[TMP26]], 72057594037927935 @@ -1109,13 +1109,13 @@ define dso_local i32 @unreachable_exit() local_unnamed_addr sanitize_hwaddress { ; AARCH64-SHORT-NOSCOPE-NEXT: ret i32 0 ; %1 = alloca i8, align 1 - call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %1) + call void @llvm.lifetime.start.p0(ptr nonnull %1) %2 = tail call i1 (...) @cond() #2 br i1 %2, label %3, label %4 3: call void @use(ptr nonnull %1) #2 - call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %1) + call void @llvm.lifetime.end.p0(ptr nonnull %1) ret i32 0 4: @@ -1137,7 +1137,7 @@ define dso_local i32 @diamond_lifetime() local_unnamed_addr sanitize_hwaddress { ; X86-SCOPE-NEXT: [[TMP9:%.*]] = shl i64 [[TMP6]], 57 ; X86-SCOPE-NEXT: [[TMP10:%.*]] = or i64 [[TMP8]], [[TMP9]] ; X86-SCOPE-NEXT: [[ALLOCA_0_HWASAN:%.*]] = inttoptr i64 [[TMP10]] to ptr -; X86-SCOPE-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[TMP4]]) +; X86-SCOPE-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP4]]) ; X86-SCOPE-NEXT: [[TMP11:%.*]] = trunc i64 [[TMP6]] to i8 ; X86-SCOPE-NEXT: call void @__hwasan_tag_memory(ptr [[TMP4]], i8 [[TMP11]], i64 16) ; X86-SCOPE-NEXT: [[TMP12:%.*]] = tail call i1 (...) @cond() @@ -1146,12 +1146,12 @@ define dso_local i32 @diamond_lifetime() local_unnamed_addr sanitize_hwaddress { ; X86-SCOPE-NEXT: call void @use(ptr nonnull [[ALLOCA_0_HWASAN]]) ; X86-SCOPE-NEXT: [[TMP14:%.*]] = trunc i64 [[HWASAN_UAR_TAG]] to i8 ; X86-SCOPE-NEXT: call void @__hwasan_tag_memory(ptr [[TMP4]], i8 [[TMP14]], i64 16) -; X86-SCOPE-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[TMP4]]) +; X86-SCOPE-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP4]]) ; X86-SCOPE-NEXT: br label [[TMP17:%.*]] ; X86-SCOPE: 15: ; X86-SCOPE-NEXT: [[TMP16:%.*]] = trunc i64 [[HWASAN_UAR_TAG]] to i8 ; X86-SCOPE-NEXT: call void @__hwasan_tag_memory(ptr [[TMP4]], i8 [[TMP16]], i64 16) -; X86-SCOPE-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[TMP4]]) +; X86-SCOPE-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP4]]) ; X86-SCOPE-NEXT: br label [[TMP17]] ; X86-SCOPE: 17: ; X86-SCOPE-NEXT: ret i32 0 @@ -1214,7 +1214,7 @@ define dso_local i32 @diamond_lifetime() local_unnamed_addr sanitize_hwaddress { ; AARCH64-SCOPE-NEXT: [[TMP23:%.*]] = shl i64 [[TMP20]], 56 ; AARCH64-SCOPE-NEXT: [[TMP24:%.*]] = or i64 [[TMP22]], [[TMP23]] ; AARCH64-SCOPE-NEXT: [[ALLOCA_0_HWASAN:%.*]] = inttoptr i64 [[TMP24]] to ptr -; AARCH64-SCOPE-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[TMP18]]) +; AARCH64-SCOPE-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP18]]) ; AARCH64-SCOPE-NEXT: [[TMP25:%.*]] = trunc i64 [[TMP20]] to i8 ; AARCH64-SCOPE-NEXT: [[TMP26:%.*]] = ptrtoint ptr [[TMP18]] to i64 ; AARCH64-SCOPE-NEXT: [[TMP27:%.*]] = and i64 [[TMP26]], 72057594037927935 @@ -1231,7 +1231,7 @@ define dso_local i32 @diamond_lifetime() local_unnamed_addr sanitize_hwaddress { ; AARCH64-SCOPE-NEXT: [[TMP35:%.*]] = lshr i64 [[TMP34]], 4 ; AARCH64-SCOPE-NEXT: [[TMP36:%.*]] = getelementptr i8, ptr [[TMP17]], i64 [[TMP35]] ; AARCH64-SCOPE-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP36]], i8 [[TMP32]], i64 1, i1 false) -; AARCH64-SCOPE-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[TMP18]]) +; AARCH64-SCOPE-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP18]]) ; AARCH64-SCOPE-NEXT: br label [[TMP43:%.*]] ; AARCH64-SCOPE: 37: ; AARCH64-SCOPE-NEXT: [[TMP38:%.*]] = trunc i64 [[HWASAN_UAR_TAG]] to i8 @@ -1240,7 +1240,7 @@ define dso_local i32 @diamond_lifetime() local_unnamed_addr sanitize_hwaddress { ; AARCH64-SCOPE-NEXT: [[TMP41:%.*]] = lshr i64 [[TMP40]], 4 ; AARCH64-SCOPE-NEXT: [[TMP42:%.*]] = getelementptr i8, ptr [[TMP17]], i64 [[TMP41]] ; AARCH64-SCOPE-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP42]], i8 [[TMP38]], i64 1, i1 false) -; AARCH64-SCOPE-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[TMP18]]) +; AARCH64-SCOPE-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP18]]) ; AARCH64-SCOPE-NEXT: br label [[TMP43]] ; AARCH64-SCOPE: 43: ; AARCH64-SCOPE-NEXT: ret i32 0 @@ -1327,7 +1327,7 @@ define dso_local i32 @diamond_lifetime() local_unnamed_addr sanitize_hwaddress { ; AARCH64-SHORT-SCOPE-NEXT: [[TMP23:%.*]] = shl i64 [[TMP20]], 56 ; AARCH64-SHORT-SCOPE-NEXT: [[TMP24:%.*]] = or i64 [[TMP22]], [[TMP23]] ; AARCH64-SHORT-SCOPE-NEXT: [[ALLOCA_0_HWASAN:%.*]] = inttoptr i64 [[TMP24]] to ptr -; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[TMP18]]) +; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP18]]) ; AARCH64-SHORT-SCOPE-NEXT: [[TMP25:%.*]] = trunc i64 [[TMP20]] to i8 ; AARCH64-SHORT-SCOPE-NEXT: [[TMP26:%.*]] = ptrtoint ptr [[TMP18]] to i64 ; AARCH64-SHORT-SCOPE-NEXT: [[TMP27:%.*]] = and i64 [[TMP26]], 72057594037927935 @@ -1347,7 +1347,7 @@ define dso_local i32 @diamond_lifetime() local_unnamed_addr sanitize_hwaddress { ; AARCH64-SHORT-SCOPE-NEXT: [[TMP37:%.*]] = lshr i64 [[TMP36]], 4 ; AARCH64-SHORT-SCOPE-NEXT: [[TMP38:%.*]] = getelementptr i8, ptr [[TMP17]], i64 [[TMP37]] ; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP38]], i8 [[TMP34]], i64 1, i1 false) -; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[TMP18]]) +; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP18]]) ; AARCH64-SHORT-SCOPE-NEXT: br label [[TMP45:%.*]] ; AARCH64-SHORT-SCOPE: 39: ; AARCH64-SHORT-SCOPE-NEXT: [[TMP40:%.*]] = trunc i64 [[HWASAN_UAR_TAG]] to i8 @@ -1356,7 +1356,7 @@ define dso_local i32 @diamond_lifetime() local_unnamed_addr sanitize_hwaddress { ; AARCH64-SHORT-SCOPE-NEXT: [[TMP43:%.*]] = lshr i64 [[TMP42]], 4 ; AARCH64-SHORT-SCOPE-NEXT: [[TMP44:%.*]] = getelementptr i8, ptr [[TMP17]], i64 [[TMP43]] ; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP44]], i8 [[TMP40]], i64 1, i1 false) -; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[TMP18]]) +; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP18]]) ; AARCH64-SHORT-SCOPE-NEXT: br label [[TMP45]] ; AARCH64-SHORT-SCOPE: 45: ; AARCH64-SHORT-SCOPE-NEXT: ret i32 0 @@ -1417,17 +1417,17 @@ define dso_local i32 @diamond_lifetime() local_unnamed_addr sanitize_hwaddress { ; AARCH64-SHORT-NOSCOPE-NEXT: ret i32 0 ; %1 = alloca i8, align 1 - call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %1) + call void @llvm.lifetime.start.p0(ptr nonnull %1) %2 = tail call i1 (...) @cond() #2 br i1 %2, label %3, label %4 3: call void @use(ptr nonnull %1) #2 - call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %1) + call void @llvm.lifetime.end.p0(ptr nonnull %1) br label %5 4: - call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %1) + call void @llvm.lifetime.end.p0(ptr nonnull %1) br label %5 5: @@ -1439,7 +1439,7 @@ declare dso_local i1 @cond(...) local_unnamed_addr declare dso_local void @use(ptr) local_unnamed_addr ; Function Attrs: argmemonly mustprogress nofree nosync nounwind willreturn -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) ; Function Attrs: argmemonly mustprogress nofree nosync nounwind willreturn -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg-kmsan.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg-kmsan.ll index 2189424..b64dfbf 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg-kmsan.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg-kmsan.ll @@ -7,10 +7,10 @@ target triple = "aarch64-unknown-linux-gnu" define i32 @foo(i32 %guard, ...) { %vl = alloca %struct.__va_list, align 8 - call void @llvm.lifetime.start.p0(i64 32, ptr %vl) + call void @llvm.lifetime.start.p0(ptr %vl) call void @llvm.va_start(ptr %vl) call void @llvm.va_end(ptr %vl) - call void @llvm.lifetime.end.p0(i64 32, ptr %vl) + call void @llvm.lifetime.end.p0(ptr %vl) ret i32 0 } @@ -45,7 +45,7 @@ define i32 @foo(i32 %guard, ...) { ; CHECK: [[STACK:%.*]] = getelementptr inbounds i8, ptr {{%.*}}, i32 192 ; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 16 {{%.*}}, ptr align 16 [[STACK]], i64 {{%.*}}, i1 false) -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 declare void @llvm.va_start(ptr) #2 declare void @llvm.va_end(ptr) #2 -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg.ll index 0bd0968..f3cceb7c 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg.ll @@ -7,10 +7,10 @@ target triple = "aarch64-unknown-linux-gnu" define i32 @foo(i32 %guard, ...) { %vl = alloca %struct.__va_list, align 8 - call void @llvm.lifetime.start.p0(i64 32, ptr %vl) + call void @llvm.lifetime.start.p0(ptr %vl) call void @llvm.va_start(ptr %vl) call void @llvm.va_end(ptr %vl) - call void @llvm.lifetime.end.p0(i64 32, ptr %vl) + call void @llvm.lifetime.end.p0(ptr %vl) ret i32 0 } @@ -45,10 +45,10 @@ define i32 @foo(i32 %guard, ...) { ; CHECK: [[STACK:%.*]] = getelementptr inbounds i8, ptr {{%.*}}, i32 192 ; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 16 {{%.*}}, ptr align 16 [[STACK]], i64 {{%.*}}, i1 false) -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 declare void @llvm.va_start(ptr) #2 declare void @llvm.va_end(ptr) #2 -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 define i32 @bar() { %1 = call i32 (i32, ...) @foo(i32 0, i32 1, i32 2, double 3.000000e+00, diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg_shadow.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg_shadow.ll index 9133b32..06a34ac 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg_shadow.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg_shadow.ll @@ -749,7 +749,7 @@ define linkonce_odr dso_local void @_Z5test2IcEvT_iz(i8 noundef %t, i32 noundef ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false) ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[ARGS:%.*]] = alloca %"struct.std::__va_list", align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr @@ -809,26 +809,26 @@ define linkonce_odr dso_local void @_Z5test2IcEvT_iz(i8 noundef %t, i32 noundef ; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]]) ; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: ret void ; entry: %args = alloca %"struct.std::__va_list", align 8 - call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %args) #5 + call void @llvm.lifetime.start.p0(ptr nonnull %args) #5 call void @llvm.va_start(ptr nonnull %args) call void @_Z3usePv(ptr noundef nonnull %args) call void @llvm.va_end(ptr nonnull %args) - call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %args) #5 + call void @llvm.lifetime.end.p0(ptr nonnull %args) #5 ret void } -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #3 +declare void @llvm.lifetime.start.p0(ptr nocapture) #3 declare void @llvm.va_start(ptr) #4 declare void @llvm.va_end(ptr) #4 -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #3 +declare void @llvm.lifetime.end.p0(ptr nocapture) #3 define linkonce_odr dso_local void @_Z5test2IiEvT_iz(i32 noundef %t, i32 noundef %n, ...) sanitize_memory { ; CHECK-LABEL: define linkonce_odr dso_local void @_Z5test2IiEvT_iz( @@ -842,7 +842,7 @@ define linkonce_odr dso_local void @_Z5test2IiEvT_iz(i32 noundef %t, i32 noundef ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false) ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[ARGS:%.*]] = alloca %"struct.std::__va_list", align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr @@ -902,16 +902,16 @@ define linkonce_odr dso_local void @_Z5test2IiEvT_iz(i32 noundef %t, i32 noundef ; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]]) ; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: ret void ; entry: %args = alloca %"struct.std::__va_list", align 8 - call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %args) #5 + call void @llvm.lifetime.start.p0(ptr nonnull %args) #5 call void @llvm.va_start(ptr nonnull %args) call void @_Z3usePv(ptr noundef nonnull %args) call void @llvm.va_end(ptr nonnull %args) - call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %args) #5 + call void @llvm.lifetime.end.p0(ptr nonnull %args) #5 ret void } @@ -927,7 +927,7 @@ define linkonce_odr dso_local void @_Z5test2IfEvT_iz(float noundef %t, i32 nound ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false) ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[ARGS:%.*]] = alloca %"struct.std::__va_list", align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr @@ -987,16 +987,16 @@ define linkonce_odr dso_local void @_Z5test2IfEvT_iz(float noundef %t, i32 nound ; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]]) ; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: ret void ; entry: %args = alloca %"struct.std::__va_list", align 8 - call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %args) #5 + call void @llvm.lifetime.start.p0(ptr nonnull %args) #5 call void @llvm.va_start(ptr nonnull %args) call void @_Z3usePv(ptr noundef nonnull %args) call void @llvm.va_end(ptr nonnull %args) - call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %args) #5 + call void @llvm.lifetime.end.p0(ptr nonnull %args) #5 ret void } @@ -1012,7 +1012,7 @@ define linkonce_odr dso_local void @_Z5test2IdEvT_iz(double noundef %t, i32 noun ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false) ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[ARGS:%.*]] = alloca %"struct.std::__va_list", align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr @@ -1072,16 +1072,16 @@ define linkonce_odr dso_local void @_Z5test2IdEvT_iz(double noundef %t, i32 noun ; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]]) ; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: ret void ; entry: %args = alloca %"struct.std::__va_list", align 8 - call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %args) #5 + call void @llvm.lifetime.start.p0(ptr nonnull %args) #5 call void @llvm.va_start(ptr nonnull %args) call void @_Z3usePv(ptr noundef nonnull %args) call void @llvm.va_end(ptr nonnull %args) - call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %args) #5 + call void @llvm.lifetime.end.p0(ptr nonnull %args) #5 ret void } @@ -1097,7 +1097,7 @@ define linkonce_odr dso_local void @_Z5test2IeEvT_iz(fp128 noundef %t, i32 nound ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false) ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[ARGS:%.*]] = alloca %"struct.std::__va_list", align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr @@ -1157,16 +1157,16 @@ define linkonce_odr dso_local void @_Z5test2IeEvT_iz(fp128 noundef %t, i32 nound ; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]]) ; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: ret void ; entry: %args = alloca %"struct.std::__va_list", align 8 - call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %args) #5 + call void @llvm.lifetime.start.p0(ptr nonnull %args) #5 call void @llvm.va_start(ptr nonnull %args) call void @_Z3usePv(ptr noundef nonnull %args) call void @llvm.va_end(ptr nonnull %args) - call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %args) #5 + call void @llvm.lifetime.end.p0(ptr nonnull %args) #5 ret void } @@ -1182,7 +1182,7 @@ define linkonce_odr dso_local void @_Z5test2I6IntIntEvT_iz(i64 %t.coerce, i32 no ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false) ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[ARGS:%.*]] = alloca %"struct.std::__va_list", align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr @@ -1242,16 +1242,16 @@ define linkonce_odr dso_local void @_Z5test2I6IntIntEvT_iz(i64 %t.coerce, i32 no ; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]]) ; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: ret void ; entry: %args = alloca %"struct.std::__va_list", align 8 - call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %args) #5 + call void @llvm.lifetime.start.p0(ptr nonnull %args) #5 call void @llvm.va_start(ptr nonnull %args) call void @_Z3usePv(ptr noundef nonnull %args) call void @llvm.va_end(ptr nonnull %args) - call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %args) #5 + call void @llvm.lifetime.end.p0(ptr nonnull %args) #5 ret void } @@ -1267,7 +1267,7 @@ define linkonce_odr dso_local void @_Z5test2I10Int64Int64EvT_iz([2 x i64] %t.coe ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false) ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[ARGS:%.*]] = alloca %"struct.std::__va_list", align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr @@ -1327,16 +1327,16 @@ define linkonce_odr dso_local void @_Z5test2I10Int64Int64EvT_iz([2 x i64] %t.coe ; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]]) ; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: ret void ; entry: %args = alloca %"struct.std::__va_list", align 8 - call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %args) #5 + call void @llvm.lifetime.start.p0(ptr nonnull %args) #5 call void @llvm.va_start(ptr nonnull %args) call void @_Z3usePv(ptr noundef nonnull %args) call void @llvm.va_end(ptr nonnull %args) - call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %args) #5 + call void @llvm.lifetime.end.p0(ptr nonnull %args) #5 ret void } @@ -1352,7 +1352,7 @@ define linkonce_odr dso_local void @_Z5test2I12DoubleDoubleEvT_iz([2 x double] a ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false) ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[ARGS:%.*]] = alloca %"struct.std::__va_list", align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr @@ -1412,16 +1412,16 @@ define linkonce_odr dso_local void @_Z5test2I12DoubleDoubleEvT_iz([2 x double] a ; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]]) ; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: ret void ; entry: %args = alloca %"struct.std::__va_list", align 8 - call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %args) #5 + call void @llvm.lifetime.start.p0(ptr nonnull %args) #5 call void @llvm.va_start(ptr nonnull %args) call void @_Z3usePv(ptr noundef nonnull %args) call void @llvm.va_end(ptr nonnull %args) - call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %args) #5 + call void @llvm.lifetime.end.p0(ptr nonnull %args) #5 ret void } @@ -1437,7 +1437,7 @@ define linkonce_odr dso_local void @_Z5test2I7Double4EvT_iz([4 x double] alignst ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false) ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[ARGS:%.*]] = alloca %"struct.std::__va_list", align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr @@ -1497,16 +1497,16 @@ define linkonce_odr dso_local void @_Z5test2I7Double4EvT_iz([4 x double] alignst ; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]]) ; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: ret void ; entry: %args = alloca %"struct.std::__va_list", align 8 - call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %args) #5 + call void @llvm.lifetime.start.p0(ptr nonnull %args) #5 call void @llvm.va_start(ptr nonnull %args) call void @_Z3usePv(ptr noundef nonnull %args) call void @llvm.va_end(ptr nonnull %args) - call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %args) #5 + call void @llvm.lifetime.end.p0(ptr nonnull %args) #5 ret void } @@ -1522,7 +1522,7 @@ define linkonce_odr dso_local void @_Z5test2I11DoubleFloatEvT_iz([2 x i64] %t.co ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false) ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[ARGS:%.*]] = alloca %"struct.std::__va_list", align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr @@ -1582,16 +1582,16 @@ define linkonce_odr dso_local void @_Z5test2I11DoubleFloatEvT_iz([2 x i64] %t.co ; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]]) ; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: ret void ; entry: %args = alloca %"struct.std::__va_list", align 8 - call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %args) #5 + call void @llvm.lifetime.start.p0(ptr nonnull %args) #5 call void @llvm.va_start(ptr nonnull %args) call void @_Z3usePv(ptr noundef nonnull %args) call void @llvm.va_end(ptr nonnull %args) - call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %args) #5 + call void @llvm.lifetime.end.p0(ptr nonnull %args) #5 ret void } @@ -1607,7 +1607,7 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble2EvT_iz([2 x fp128] ali ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false) ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[ARGS:%.*]] = alloca %"struct.std::__va_list", align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr @@ -1667,16 +1667,16 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble2EvT_iz([2 x fp128] ali ; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]]) ; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: ret void ; entry: %args = alloca %"struct.std::__va_list", align 8 - call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %args) #5 + call void @llvm.lifetime.start.p0(ptr nonnull %args) #5 call void @llvm.va_start(ptr nonnull %args) call void @_Z3usePv(ptr noundef nonnull %args) call void @llvm.va_end(ptr nonnull %args) - call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %args) #5 + call void @llvm.lifetime.end.p0(ptr nonnull %args) #5 ret void } @@ -1692,7 +1692,7 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble4EvT_iz([4 x fp128] ali ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false) ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[ARGS:%.*]] = alloca %"struct.std::__va_list", align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr @@ -1752,16 +1752,16 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble4EvT_iz([4 x fp128] ali ; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]]) ; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: ret void ; entry: %args = alloca %"struct.std::__va_list", align 8 - call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %args) #5 + call void @llvm.lifetime.start.p0(ptr nonnull %args) #5 call void @llvm.va_start(ptr nonnull %args) call void @_Z3usePv(ptr noundef nonnull %args) call void @llvm.va_end(ptr nonnull %args) - call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %args) #5 + call void @llvm.lifetime.end.p0(ptr nonnull %args) #5 ret void } diff --git a/llvm/test/Instrumentation/MemorySanitizer/ARM32/vararg-arm32.ll b/llvm/test/Instrumentation/MemorySanitizer/ARM32/vararg-arm32.ll index 52f4901..e05018c 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/ARM32/vararg-arm32.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/ARM32/vararg-arm32.ll @@ -18,7 +18,7 @@ define i32 @foo(i32 %guard, ...) { ; CHECK-NEXT: [[TMP6:%.*]] = and i64 [[TMP5]], -2147483649 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP7]], i8 0, i64 8, i1 false) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[VL]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VL]]) ; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[VL]] to i64 ; CHECK-NEXT: [[TMP9:%.*]] = and i64 [[TMP8]], -2147483649 ; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr @@ -32,15 +32,15 @@ define i32 @foo(i32 %guard, ...) { ; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP16]], ptr align 8 [[TMP3]], i64 [[TMP2]], i1 false) ; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VL]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr [[VL]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VL]]) ; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret i32 0 ; %vl = alloca ptr, align 8 - call void @llvm.lifetime.start.p0(i64 32, ptr %vl) + call void @llvm.lifetime.start.p0(ptr %vl) call void @llvm.va_start(ptr %vl) call void @llvm.va_end(ptr %vl) - call void @llvm.lifetime.end.p0(i64 32, ptr %vl) + call void @llvm.lifetime.end.p0(ptr %vl) ret i32 0 } @@ -49,10 +49,10 @@ define i32 @foo(i32 %guard, ...) { -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 declare void @llvm.va_start(ptr) #2 declare void @llvm.va_end(ptr) #2 -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 define i32 @bar() { ; CHECK-LABEL: define i32 @bar() { diff --git a/llvm/test/Instrumentation/MemorySanitizer/LoongArch/vararg-loongarch64.ll b/llvm/test/Instrumentation/MemorySanitizer/LoongArch/vararg-loongarch64.ll index 23df3fc..e6d3a4b 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/LoongArch/vararg-loongarch64.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/LoongArch/vararg-loongarch64.ll @@ -4,10 +4,10 @@ target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n64-S128" target triple = "loongarch64-unknown-linux-gnu" ;; First, check allocation of the save area. -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 declare void @llvm.va_start(ptr) #2 declare void @llvm.va_end(ptr) #2 -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 define i32 @foo(i32 %guard, ...) { ; CHECK-LABEL: @foo ; CHECK: [[TMP1:%.*]] = load {{.*}} @__msan_va_arg_overflow_size_tls @@ -17,10 +17,10 @@ define i32 @foo(i32 %guard, ...) { ; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP3]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP4]], i1 false) ; %vl = alloca ptr, align 8 - call void @llvm.lifetime.start.p0(i64 32, ptr %vl) + call void @llvm.lifetime.start.p0(ptr %vl) call void @llvm.va_start(ptr %vl) call void @llvm.va_end(ptr %vl) - call void @llvm.lifetime.end.p0(i64 32, ptr %vl) + call void @llvm.lifetime.end.p0(ptr %vl) ret i32 0 } diff --git a/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64.ll b/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64.ll index 64a76c5..69a74a3 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64.ll @@ -5,10 +5,10 @@ target triple = "mips64--linux" define i32 @foo(i32 %guard, ...) { %vl = alloca ptr, align 8 - call void @llvm.lifetime.start.p0(i64 32, ptr %vl) + call void @llvm.lifetime.start.p0(ptr %vl) call void @llvm.va_start(ptr %vl) call void @llvm.va_end(ptr %vl) - call void @llvm.lifetime.end.p0(i64 32, ptr %vl) + call void @llvm.lifetime.end.p0(ptr %vl) ret i32 0 } @@ -23,10 +23,10 @@ define i32 @foo(i32 %guard, ...) { ; CHECK: [[D:%.*]] = call i64 @llvm.umin.i64(i64 [[A]], i64 800) ; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[C]], ptr align 8 @__msan_va_arg_tls, i64 [[D]], i1 false) -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 declare void @llvm.va_start(ptr) #2 declare void @llvm.va_end(ptr) #2 -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 define i32 @bar() { %1 = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00) diff --git a/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64el.ll b/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64el.ll index 9f3127e..b19da8e 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64el.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64el.ll @@ -5,10 +5,10 @@ target triple = "mips64el--linux" define i32 @foo(i32 %guard, ...) { %vl = alloca ptr, align 8 - call void @llvm.lifetime.start.p0(i64 32, ptr %vl) + call void @llvm.lifetime.start.p0(ptr %vl) call void @llvm.va_start(ptr %vl) call void @llvm.va_end(ptr %vl) - call void @llvm.lifetime.end.p0(i64 32, ptr %vl) + call void @llvm.lifetime.end.p0(ptr %vl) ret i32 0 } @@ -23,10 +23,10 @@ define i32 @foo(i32 %guard, ...) { ; CHECK: [[D:%.*]] = call i64 @llvm.umin.i64(i64 [[A]], i64 800) ; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[C]], ptr align 8 @__msan_va_arg_tls, i64 [[D]], i1 false) -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 declare void @llvm.va_start(ptr) #2 declare void @llvm.va_end(ptr) #2 -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 define i32 @bar() { %1 = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00) diff --git a/llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mips.ll b/llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mips.ll index 05a88f0..4d47b02 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mips.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mips.ll @@ -18,7 +18,7 @@ define i32 @foo(i32 %guard, ...) { ; CHECK-NEXT: [[TMP6:%.*]] = and i64 [[TMP5]], -2147483649 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP7]], i8 0, i64 8, i1 false) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[VL]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VL]]) ; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[VL]] to i64 ; CHECK-NEXT: [[TMP9:%.*]] = and i64 [[TMP8]], -2147483649 ; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr @@ -32,15 +32,15 @@ define i32 @foo(i32 %guard, ...) { ; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP16]], ptr align 8 [[TMP3]], i64 [[TMP2]], i1 false) ; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VL]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr [[VL]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VL]]) ; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret i32 0 ; %vl = alloca ptr, align 8 - call void @llvm.lifetime.start.p0(i64 32, ptr %vl) + call void @llvm.lifetime.start.p0(ptr %vl) call void @llvm.va_start(ptr %vl) call void @llvm.va_end(ptr %vl) - call void @llvm.lifetime.end.p0(i64 32, ptr %vl) + call void @llvm.lifetime.end.p0(ptr %vl) ret i32 0 } @@ -49,10 +49,10 @@ define i32 @foo(i32 %guard, ...) { -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 declare void @llvm.va_start(ptr) #2 declare void @llvm.va_end(ptr) #2 -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 define i32 @bar() { ; CHECK-LABEL: define i32 @bar() { diff --git a/llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mipsel.ll b/llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mipsel.ll index 971b25f..98294e7 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mipsel.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mipsel.ll @@ -18,7 +18,7 @@ define i32 @foo(i32 %guard, ...) { ; CHECK-NEXT: [[TMP6:%.*]] = and i64 [[TMP5]], -2147483649 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP7]], i8 0, i64 8, i1 false) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[VL]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VL]]) ; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[VL]] to i64 ; CHECK-NEXT: [[TMP9:%.*]] = and i64 [[TMP8]], -2147483649 ; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr @@ -32,15 +32,15 @@ define i32 @foo(i32 %guard, ...) { ; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP16]], ptr align 8 [[TMP3]], i64 [[TMP2]], i1 false) ; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VL]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr [[VL]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VL]]) ; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret i32 0 ; %vl = alloca ptr, align 8 - call void @llvm.lifetime.start.p0(i64 32, ptr %vl) + call void @llvm.lifetime.start.p0(ptr %vl) call void @llvm.va_start(ptr %vl) call void @llvm.va_end(ptr %vl) - call void @llvm.lifetime.end.p0(i64 32, ptr %vl) + call void @llvm.lifetime.end.p0(ptr %vl) ret i32 0 } @@ -49,10 +49,10 @@ define i32 @foo(i32 %guard, ...) { -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 declare void @llvm.va_start(ptr) #2 declare void @llvm.va_end(ptr) #2 -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 define i32 @bar() { ; CHECK-LABEL: define i32 @bar() { diff --git a/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64.ll b/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64.ll index 45e8b2d..9351067 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64.ll @@ -5,10 +5,10 @@ target triple = "powerpc64--linux" define i32 @foo(i32 %guard, ...) { %vl = alloca ptr, align 8 - call void @llvm.lifetime.start.p0(i64 32, ptr %vl) + call void @llvm.lifetime.start.p0(ptr %vl) call void @llvm.va_start(ptr %vl) call void @llvm.va_end(ptr %vl) - call void @llvm.lifetime.end.p0(i64 32, ptr %vl) + call void @llvm.lifetime.end.p0(ptr %vl) ret i32 0 } @@ -23,10 +23,10 @@ define i32 @foo(i32 %guard, ...) { ; CHECK: [[D:%.*]] = call i64 @llvm.umin.i64(i64 [[A]], i64 800) ; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[C]], ptr align 8 @__msan_va_arg_tls, i64 [[D]], i1 false) -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 declare void @llvm.va_start(ptr) #2 declare void @llvm.va_end(ptr) #2 -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 define i32 @bar() { %1 = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00) diff --git a/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64le.ll b/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64le.ll index d6b956c..4151f3b 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64le.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64le.ll @@ -5,10 +5,10 @@ target triple = "powerpc64le--linux" define i32 @foo(i32 %guard, ...) { %vl = alloca ptr, align 8 - call void @llvm.lifetime.start.p0(i64 32, ptr %vl) + call void @llvm.lifetime.start.p0(ptr %vl) call void @llvm.va_start(ptr %vl) call void @llvm.va_end(ptr %vl) - call void @llvm.lifetime.end.p0(i64 32, ptr %vl) + call void @llvm.lifetime.end.p0(ptr %vl) ret i32 0 } @@ -23,10 +23,10 @@ define i32 @foo(i32 %guard, ...) { ; CHECK: [[D:%.*]] = call i64 @llvm.umin.i64(i64 [[A]], i64 800) ; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[C]], ptr align 8 @__msan_va_arg_tls, i64 [[D]], i1 false) -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 declare void @llvm.va_start(ptr) #2 declare void @llvm.va_end(ptr) #2 -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 define i32 @bar() { %1 = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00) diff --git a/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppc.ll b/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppc.ll index 246db9d..29d1fbd 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppc.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppc.ll @@ -18,7 +18,7 @@ define i32 @foo(i32 %guard, ...) { ; CHECK-NEXT: [[TMP5:%.*]] = and i32 [[TMP4]], 2147483647 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i32 [[TMP5]] to ptr ; CHECK-NEXT: call void @llvm.memset.p0.i32(ptr align 8 [[TMP6]], i8 0, i32 4, i1 false) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[VL]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VL]]) ; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[VL]] to i32 ; CHECK-NEXT: [[TMP8:%.*]] = and i32 [[TMP7]], 2147483647 ; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i32 [[TMP8]] to ptr @@ -50,15 +50,15 @@ define i32 @foo(i32 %guard, ...) { ; CHECK-NEXT: [[TMP31:%.*]] = inttoptr i32 [[TMP30]] to ptr ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[TMP17]], ptr align 4 [[TMP31]], i32 [[TMP21]], i1 false) ; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VL]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr [[VL]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VL]]) ; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret i32 0 ; %vl = alloca ptr, align 8 - call void @llvm.lifetime.start.p0(i64 32, ptr %vl) + call void @llvm.lifetime.start.p0(ptr %vl) call void @llvm.va_start(ptr %vl) call void @llvm.va_end(ptr %vl) - call void @llvm.lifetime.end.p0(i64 32, ptr %vl) + call void @llvm.lifetime.end.p0(ptr %vl) ret i32 0 } @@ -67,10 +67,10 @@ define i32 @foo(i32 %guard, ...) { -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 declare void @llvm.va_start(ptr) #2 declare void @llvm.va_end(ptr) #2 -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 define i32 @bar() { ; CHECK-LABEL: define i32 @bar() { diff --git a/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppcle.ll b/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppcle.ll index 4a7b7b2..a4d2e16 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppcle.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppcle.ll @@ -18,7 +18,7 @@ define i32 @foo(i32 %guard, ...) { ; CHECK-NEXT: [[TMP5:%.*]] = and i32 [[TMP4]], 2147483647 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i32 [[TMP5]] to ptr ; CHECK-NEXT: call void @llvm.memset.p0.i32(ptr align 8 [[TMP6]], i8 0, i32 4, i1 false) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[VL]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VL]]) ; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[VL]] to i32 ; CHECK-NEXT: [[TMP8:%.*]] = and i32 [[TMP7]], 2147483647 ; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i32 [[TMP8]] to ptr @@ -50,15 +50,15 @@ define i32 @foo(i32 %guard, ...) { ; CHECK-NEXT: [[TMP31:%.*]] = inttoptr i32 [[TMP30]] to ptr ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[TMP17]], ptr align 4 [[TMP31]], i32 [[TMP21]], i1 false) ; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VL]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr [[VL]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VL]]) ; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret i32 0 ; %vl = alloca ptr, align 8 - call void @llvm.lifetime.start.p0(i64 32, ptr %vl) + call void @llvm.lifetime.start.p0(ptr %vl) call void @llvm.va_start(ptr %vl) call void @llvm.va_end(ptr %vl) - call void @llvm.lifetime.end.p0(i64 32, ptr %vl) + call void @llvm.lifetime.end.p0(ptr %vl) ret i32 0 } @@ -67,10 +67,10 @@ define i32 @foo(i32 %guard, ...) { -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 declare void @llvm.va_start(ptr) #2 declare void @llvm.va_end(ptr) #2 -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 define i32 @bar() { ; CHECK-LABEL: define i32 @bar() { diff --git a/llvm/test/Instrumentation/MemorySanitizer/RISCV32/vararg-riscv32.ll b/llvm/test/Instrumentation/MemorySanitizer/RISCV32/vararg-riscv32.ll index 50e7be1..0c6e75c 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/RISCV32/vararg-riscv32.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/RISCV32/vararg-riscv32.ll @@ -18,7 +18,7 @@ define i32 @foo(i32 %guard, ...) { ; CHECK-NEXT: [[TMP6:%.*]] = and i64 [[TMP5]], -2147483649 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP7]], i8 0, i64 8, i1 false) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[VL]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VL]]) ; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[VL]] to i64 ; CHECK-NEXT: [[TMP9:%.*]] = and i64 [[TMP8]], -2147483649 ; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr @@ -32,15 +32,15 @@ define i32 @foo(i32 %guard, ...) { ; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP16]], ptr align 8 [[TMP3]], i64 [[TMP2]], i1 false) ; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VL]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr [[VL]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VL]]) ; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret i32 0 ; %vl = alloca ptr, align 8 - call void @llvm.lifetime.start.p0(i64 32, ptr %vl) + call void @llvm.lifetime.start.p0(ptr %vl) call void @llvm.va_start(ptr %vl) call void @llvm.va_end(ptr %vl) - call void @llvm.lifetime.end.p0(i64 32, ptr %vl) + call void @llvm.lifetime.end.p0(ptr %vl) ret i32 0 } @@ -49,10 +49,10 @@ define i32 @foo(i32 %guard, ...) { -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 declare void @llvm.va_start(ptr) #2 declare void @llvm.va_end(ptr) #2 -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 define i32 @bar() { ; CHECK-LABEL: define i32 @bar() { diff --git a/llvm/test/Instrumentation/MemorySanitizer/SystemZ/vararg-kernel.ll b/llvm/test/Instrumentation/MemorySanitizer/SystemZ/vararg-kernel.ll index e0b5907..c340d15 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/SystemZ/vararg-kernel.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/SystemZ/vararg-kernel.ll @@ -4,17 +4,17 @@ target datalayout = "E-m:e-i1:8:16-i8:8:16-i64:64-f128:64-a:8:16-n32:64" target triple = "s390x-unknown-linux-gnu" %struct.__va_list = type { i64, i64, ptr, ptr } -declare void @llvm.lifetime.start.p0(i64, ptr) +declare void @llvm.lifetime.start.p0(ptr) declare void @llvm.va_start(ptr) declare void @llvm.va_end(ptr) -declare void @llvm.lifetime.end.p0(i64, ptr) +declare void @llvm.lifetime.end.p0(ptr) define i64 @foo(i64 %guard, ...) #1 { %vl = alloca %struct.__va_list - call void @llvm.lifetime.start.p0(i64 32, ptr %vl) + call void @llvm.lifetime.start.p0(ptr %vl) call void @llvm.va_start(ptr %vl) call void @llvm.va_end(ptr %vl) - call void @llvm.lifetime.end.p0(i64 32, ptr %vl) + call void @llvm.lifetime.end.p0(ptr %vl) ret i64 0 } diff --git a/llvm/test/Instrumentation/MemorySanitizer/SystemZ/vararg.ll b/llvm/test/Instrumentation/MemorySanitizer/SystemZ/vararg.ll index 009aef9..91b21ea 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/SystemZ/vararg.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/SystemZ/vararg.ll @@ -7,10 +7,10 @@ target triple = "s390x-unknown-linux-gnu" define i64 @foo(i64 %guard, ...) { %vl = alloca %struct.__va_list, align 8 - call void @llvm.lifetime.start.p0(i64 32, ptr %vl) + call void @llvm.lifetime.start.p0(ptr %vl) call void @llvm.va_start(ptr %vl) call void @llvm.va_end(ptr %vl) - call void @llvm.lifetime.end.p0(i64 32, ptr %vl) + call void @llvm.lifetime.end.p0(ptr %vl) ret i64 0 } @@ -28,10 +28,10 @@ define i64 @foo(i64 %guard, ...) { ; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 {{%.*}}, ptr align 8 {{%.*}}, i64 160, i1 false) ; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 {{%.*}}, ptr align 8 {{%.*}}, i64 [[A]], i1 false) -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 declare void @llvm.va_start(ptr) #2 declare void @llvm.va_end(ptr) #2 -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 declare i32 @random_i32() declare i64 @random_i64() diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_call.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_call.ll index 7a3f0dd..b61cb6a 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_call.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_call.ll @@ -29,7 +29,7 @@ entry: define dso_local i32 @sum(i32 %n, ...) local_unnamed_addr #0 { entry: %args = alloca [1 x %struct.__va_list_tag], align 16 - call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #2 + call void @llvm.lifetime.start.p0(ptr nonnull %args) #2 call void @llvm.va_start(ptr nonnull %args) %cmp9 = icmp sgt i32 %n, 0 br i1 %cmp9, label %for.body.lr.ph, label %for.end @@ -85,13 +85,13 @@ vaarg.end: ; preds = %vaarg.in_mem, %vaar for.end: ; preds = %vaarg.end, %entry %sum.0.lcssa = phi i32 [ 0, %entry ], [ %add, %vaarg.end ] call void @llvm.va_end(ptr nonnull %args) - call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #2 + call void @llvm.lifetime.end.p0(ptr nonnull %args) #2 ret i32 %sum.0.lcssa } ; Function Attrs: argmemonly nounwind -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 ; Function Attrs: nounwind declare void @llvm.va_start(ptr) #2 @@ -100,7 +100,7 @@ declare void @llvm.va_start(ptr) #2 declare void @llvm.va_end(ptr) #2 ; Function Attrs: argmemonly nounwind -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 declare dso_local i80 @sum_i80(i32, ...) local_unnamed_addr diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_shadow.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_shadow.ll index 2051015..4bc14da 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_shadow.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_shadow.ll @@ -551,7 +551,7 @@ define linkonce_odr dso_local void @_Z5test2IcEvT_iz(i8 noundef signext %t, i32 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false) ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr @@ -581,26 +581,26 @@ define linkonce_odr dso_local void @_Z5test2IcEvT_iz(i8 noundef signext %t, i32 ; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]]) ; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: ret void ; entry: %args = alloca [1 x %struct.__va_list_tag], align 16 - call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6 + call void @llvm.lifetime.start.p0(ptr nonnull %args) #6 call void @llvm.va_start(ptr nonnull %args) call void @_Z3usePv(ptr noundef nonnull %args) call void @llvm.va_end(ptr nonnull %args) - call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6 + call void @llvm.lifetime.end.p0(ptr nonnull %args) #6 ret void } -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #4 +declare void @llvm.lifetime.start.p0(ptr nocapture) #4 declare void @llvm.va_start(ptr) #5 declare void @llvm.va_end(ptr) #5 -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #4 +declare void @llvm.lifetime.end.p0(ptr nocapture) #4 define linkonce_odr dso_local void @_Z5test2IiEvT_iz(i32 noundef %t, i32 noundef %n, ...) sanitize_memory { ; CHECK-LABEL: define linkonce_odr dso_local void @_Z5test2IiEvT_iz( @@ -614,7 +614,7 @@ define linkonce_odr dso_local void @_Z5test2IiEvT_iz(i32 noundef %t, i32 noundef ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false) ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr @@ -644,16 +644,16 @@ define linkonce_odr dso_local void @_Z5test2IiEvT_iz(i32 noundef %t, i32 noundef ; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]]) ; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: ret void ; entry: %args = alloca [1 x %struct.__va_list_tag], align 16 - call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6 + call void @llvm.lifetime.start.p0(ptr nonnull %args) #6 call void @llvm.va_start(ptr nonnull %args) call void @_Z3usePv(ptr noundef nonnull %args) call void @llvm.va_end(ptr nonnull %args) - call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6 + call void @llvm.lifetime.end.p0(ptr nonnull %args) #6 ret void } @@ -669,7 +669,7 @@ define linkonce_odr dso_local void @_Z5test2IfEvT_iz(float noundef %t, i32 nound ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false) ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr @@ -699,16 +699,16 @@ define linkonce_odr dso_local void @_Z5test2IfEvT_iz(float noundef %t, i32 nound ; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]]) ; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: ret void ; entry: %args = alloca [1 x %struct.__va_list_tag], align 16 - call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6 + call void @llvm.lifetime.start.p0(ptr nonnull %args) #6 call void @llvm.va_start(ptr nonnull %args) call void @_Z3usePv(ptr noundef nonnull %args) call void @llvm.va_end(ptr nonnull %args) - call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6 + call void @llvm.lifetime.end.p0(ptr nonnull %args) #6 ret void } @@ -724,7 +724,7 @@ define linkonce_odr dso_local void @_Z5test2IdEvT_iz(double noundef %t, i32 noun ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false) ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr @@ -754,16 +754,16 @@ define linkonce_odr dso_local void @_Z5test2IdEvT_iz(double noundef %t, i32 noun ; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]]) ; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: ret void ; entry: %args = alloca [1 x %struct.__va_list_tag], align 16 - call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6 + call void @llvm.lifetime.start.p0(ptr nonnull %args) #6 call void @llvm.va_start(ptr nonnull %args) call void @_Z3usePv(ptr noundef nonnull %args) call void @llvm.va_end(ptr nonnull %args) - call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6 + call void @llvm.lifetime.end.p0(ptr nonnull %args) #6 ret void } @@ -779,7 +779,7 @@ define linkonce_odr dso_local void @_Z5test2IeEvT_iz(x86_fp80 noundef %t, i32 no ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false) ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr @@ -809,16 +809,16 @@ define linkonce_odr dso_local void @_Z5test2IeEvT_iz(x86_fp80 noundef %t, i32 no ; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]]) ; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: ret void ; entry: %args = alloca [1 x %struct.__va_list_tag], align 16 - call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6 + call void @llvm.lifetime.start.p0(ptr nonnull %args) #6 call void @llvm.va_start(ptr nonnull %args) call void @_Z3usePv(ptr noundef nonnull %args) call void @llvm.va_end(ptr nonnull %args) - call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6 + call void @llvm.lifetime.end.p0(ptr nonnull %args) #6 ret void } @@ -834,7 +834,7 @@ define linkonce_odr dso_local void @_Z5test2I6IntIntEvT_iz(i64 %t.coerce, i32 no ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false) ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr @@ -864,16 +864,16 @@ define linkonce_odr dso_local void @_Z5test2I6IntIntEvT_iz(i64 %t.coerce, i32 no ; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]]) ; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: ret void ; entry: %args = alloca [1 x %struct.__va_list_tag], align 16 - call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6 + call void @llvm.lifetime.start.p0(ptr nonnull %args) #6 call void @llvm.va_start(ptr nonnull %args) call void @_Z3usePv(ptr noundef nonnull %args) call void @llvm.va_end(ptr nonnull %args) - call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6 + call void @llvm.lifetime.end.p0(ptr nonnull %args) #6 ret void } @@ -889,7 +889,7 @@ define linkonce_odr dso_local void @_Z5test2I10Int64Int64EvT_iz(i64 %t.coerce0, ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false) ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr @@ -919,16 +919,16 @@ define linkonce_odr dso_local void @_Z5test2I10Int64Int64EvT_iz(i64 %t.coerce0, ; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]]) ; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: ret void ; entry: %args = alloca [1 x %struct.__va_list_tag], align 16 - call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6 + call void @llvm.lifetime.start.p0(ptr nonnull %args) #6 call void @llvm.va_start(ptr nonnull %args) call void @_Z3usePv(ptr noundef nonnull %args) call void @llvm.va_end(ptr nonnull %args) - call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6 + call void @llvm.lifetime.end.p0(ptr nonnull %args) #6 ret void } @@ -944,7 +944,7 @@ define linkonce_odr dso_local void @_Z5test2I12DoubleDoubleEvT_iz(double %t.coer ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false) ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr @@ -974,16 +974,16 @@ define linkonce_odr dso_local void @_Z5test2I12DoubleDoubleEvT_iz(double %t.coer ; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]]) ; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: ret void ; entry: %args = alloca [1 x %struct.__va_list_tag], align 16 - call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6 + call void @llvm.lifetime.start.p0(ptr nonnull %args) #6 call void @llvm.va_start(ptr nonnull %args) call void @_Z3usePv(ptr noundef nonnull %args) call void @llvm.va_end(ptr nonnull %args) - call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6 + call void @llvm.lifetime.end.p0(ptr nonnull %args) #6 ret void } @@ -999,7 +999,7 @@ define linkonce_odr dso_local void @_Z5test2I7Double4EvT_iz(ptr noundef byval(%s ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false) ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr @@ -1029,16 +1029,16 @@ define linkonce_odr dso_local void @_Z5test2I7Double4EvT_iz(ptr noundef byval(%s ; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]]) ; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: ret void ; entry: %args = alloca [1 x %struct.__va_list_tag], align 16 - call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6 + call void @llvm.lifetime.start.p0(ptr nonnull %args) #6 call void @llvm.va_start(ptr nonnull %args) call void @_Z3usePv(ptr noundef nonnull %args) call void @llvm.va_end(ptr nonnull %args) - call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6 + call void @llvm.lifetime.end.p0(ptr nonnull %args) #6 ret void } @@ -1054,7 +1054,7 @@ define linkonce_odr dso_local void @_Z5test2I11DoubleFloatEvT_iz(double %t.coerc ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false) ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr @@ -1084,16 +1084,16 @@ define linkonce_odr dso_local void @_Z5test2I11DoubleFloatEvT_iz(double %t.coerc ; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]]) ; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: ret void ; entry: %args = alloca [1 x %struct.__va_list_tag], align 16 - call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6 + call void @llvm.lifetime.start.p0(ptr nonnull %args) #6 call void @llvm.va_start(ptr nonnull %args) call void @_Z3usePv(ptr noundef nonnull %args) call void @llvm.va_end(ptr nonnull %args) - call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6 + call void @llvm.lifetime.end.p0(ptr nonnull %args) #6 ret void } @@ -1109,7 +1109,7 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble2EvT_iz(ptr noundef byv ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false) ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr @@ -1139,16 +1139,16 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble2EvT_iz(ptr noundef byv ; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]]) ; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: ret void ; entry: %args = alloca [1 x %struct.__va_list_tag], align 16 - call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6 + call void @llvm.lifetime.start.p0(ptr nonnull %args) #6 call void @llvm.va_start(ptr nonnull %args) call void @_Z3usePv(ptr noundef nonnull %args) call void @llvm.va_end(ptr nonnull %args) - call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6 + call void @llvm.lifetime.end.p0(ptr nonnull %args) #6 ret void } @@ -1164,7 +1164,7 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble4EvT_iz(ptr noundef byv ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false) ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr @@ -1194,16 +1194,16 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble4EvT_iz(ptr noundef byv ; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]]) ; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: ret void ; entry: %args = alloca [1 x %struct.__va_list_tag], align 16 - call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6 + call void @llvm.lifetime.start.p0(ptr nonnull %args) #6 call void @llvm.va_start(ptr nonnull %args) call void @_Z3usePv(ptr noundef nonnull %args) call void @llvm.va_end(ptr nonnull %args) - call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6 + call void @llvm.lifetime.end.p0(ptr nonnull %args) #6 ret void } diff --git a/llvm/test/Instrumentation/MemorySanitizer/alloca.ll b/llvm/test/Instrumentation/MemorySanitizer/alloca.ll index 40ade5f..c05702b 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/alloca.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/alloca.ll @@ -125,12 +125,12 @@ entry: br label %another_bb another_bb: - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %x) + call void @llvm.lifetime.start.p0(ptr nonnull %x) store i32 7, ptr %x - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %x) - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %x) + call void @llvm.lifetime.end.p0(ptr nonnull %x) + call void @llvm.lifetime.start.p0(ptr nonnull %x) store i32 8, ptr %x - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %x) + call void @llvm.lifetime.end.p0(ptr nonnull %x) ret void } @@ -158,8 +158,10 @@ another_bb: define void @lifetime_start_var(i64 %cnt) sanitize_memory { entry: %x = alloca i32, i64 %cnt, align 4 - call void @llvm.lifetime.start.p0(i64 -1, ptr nonnull %x) - call void @llvm.lifetime.end.p0(i64 -1, ptr nonnull %x) + call void @llvm.lifetime.start.p0(ptr nonnull %x) + call void @llvm.lifetime.end.p0(ptr nonnull %x) + call void @llvm.lifetime.start.p0(ptr nonnull %x) + call void @llvm.lifetime.end.p0(ptr nonnull %x) ret void } @@ -176,5 +178,5 @@ entry: ; CHECK: call void @llvm.lifetime.end ; CHECK: ret void -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) diff --git a/llvm/test/Instrumentation/MemorySanitizer/i386/vararg_call.ll b/llvm/test/Instrumentation/MemorySanitizer/i386/vararg_call.ll index b27ef5d..2745939 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/i386/vararg_call.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/i386/vararg_call.ll @@ -93,7 +93,7 @@ define dso_local i32 @sum(i32 %n, ...) local_unnamed_addr #0 { ; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[TMP6]], i8 0, i64 24, i1 false) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[ARGS]] to i64 ; CHECK-NEXT: [[TMP8:%.*]] = and i64 [[TMP7]], -2147483649 ; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr @@ -165,7 +165,7 @@ define dso_local i32 @sum(i32 %n, ...) local_unnamed_addr #0 { ; CHECK: [[FOR_END]]: ; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[ADD]], %[[VAARG_END]] ] ; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret i32 [[SUM_0_LCSSA]] ; @@ -186,7 +186,7 @@ define dso_local i32 @sum(i32 %n, ...) local_unnamed_addr #0 { ; ORIGIN-NEXT: [[TMP9:%.*]] = and i64 [[TMP8]], -4 ; ORIGIN-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr ; ORIGIN-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[TMP7]], i8 0, i64 24, i1 false) -; ORIGIN-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]]) +; ORIGIN-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]]) ; ORIGIN-NEXT: [[TMP23:%.*]] = ptrtoint ptr [[ARGS]] to i64 ; ORIGIN-NEXT: [[TMP11:%.*]] = and i64 [[TMP23]], -2147483649 ; ORIGIN-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr @@ -266,7 +266,7 @@ define dso_local i32 @sum(i32 %n, ...) local_unnamed_addr #0 { ; ORIGIN: [[FOR_END]]: ; ORIGIN-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[ADD]], %[[VAARG_END]] ] ; ORIGIN-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]]) -; ORIGIN-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]]) +; ORIGIN-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]]) ; ORIGIN-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 ; ORIGIN-NEXT: store i32 0, ptr @__msan_retval_origin_tls, align 4 ; ORIGIN-NEXT: ret i32 [[SUM_0_LCSSA]] @@ -288,7 +288,7 @@ define dso_local i32 @sum(i32 %n, ...) local_unnamed_addr #0 { ; ORIGIN2-NEXT: [[TMP9:%.*]] = and i64 [[TMP8]], -4 ; ORIGIN2-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr ; ORIGIN2-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[TMP7]], i8 0, i64 24, i1 false) -; ORIGIN2-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]]) +; ORIGIN2-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]]) ; ORIGIN2-NEXT: [[TMP23:%.*]] = ptrtoint ptr [[ARGS]] to i64 ; ORIGIN2-NEXT: [[TMP11:%.*]] = and i64 [[TMP23]], -2147483649 ; ORIGIN2-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr @@ -368,14 +368,14 @@ define dso_local i32 @sum(i32 %n, ...) local_unnamed_addr #0 { ; ORIGIN2: [[FOR_END]]: ; ORIGIN2-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[ADD]], %[[VAARG_END]] ] ; ORIGIN2-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]]) -; ORIGIN2-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]]) +; ORIGIN2-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]]) ; ORIGIN2-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 ; ORIGIN2-NEXT: store i32 0, ptr @__msan_retval_origin_tls, align 4 ; ORIGIN2-NEXT: ret i32 [[SUM_0_LCSSA]] ; entry: %args = alloca [1 x %struct.__va_list_tag], align 16 - call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #2 + call void @llvm.lifetime.start.p0(ptr nonnull %args) #2 call void @llvm.va_start(ptr nonnull %args) %cmp9 = icmp sgt i32 %n, 0 br i1 %cmp9, label %for.body.lr.ph, label %for.end @@ -419,13 +419,13 @@ vaarg.end: ; preds = %vaarg.in_mem, %vaar for.end: ; preds = %vaarg.end, %entry %sum.0.lcssa = phi i32 [ 0, %entry ], [ %add, %vaarg.end ] call void @llvm.va_end(ptr nonnull %args) - call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #2 + call void @llvm.lifetime.end.p0(ptr nonnull %args) #2 ret i32 %sum.0.lcssa } ; Function Attrs: argmemonly nounwind -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 ; Function Attrs: nounwind declare void @llvm.va_start(ptr) #2 @@ -434,7 +434,7 @@ declare void @llvm.va_start(ptr) #2 declare void @llvm.va_end(ptr) #2 ; Function Attrs: argmemonly nounwind -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 declare dso_local i80 @sum_i80(i32, ...) local_unnamed_addr diff --git a/llvm/test/Instrumentation/MemorySanitizer/i386/vararg_shadow.ll b/llvm/test/Instrumentation/MemorySanitizer/i386/vararg_shadow.ll index aedefca..74a6276 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/i386/vararg_shadow.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/i386/vararg_shadow.ll @@ -562,7 +562,7 @@ define linkonce_odr dso_local void @_Z5test2IcEvT_iz(i8 noundef signext %t, i32 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false) ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr @@ -582,26 +582,26 @@ define linkonce_odr dso_local void @_Z5test2IcEvT_iz(i8 noundef signext %t, i32 ; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]]) ; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: ret void ; entry: %args = alloca [1 x %struct.__va_list_tag], align 16 - call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6 + call void @llvm.lifetime.start.p0(ptr nonnull %args) #6 call void @llvm.va_start(ptr nonnull %args) call void @_Z3usePv(ptr noundef nonnull %args) call void @llvm.va_end(ptr nonnull %args) - call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6 + call void @llvm.lifetime.end.p0(ptr nonnull %args) #6 ret void } -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #4 +declare void @llvm.lifetime.start.p0(ptr nocapture) #4 declare void @llvm.va_start(ptr) #5 declare void @llvm.va_end(ptr) #5 -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #4 +declare void @llvm.lifetime.end.p0(ptr nocapture) #4 define linkonce_odr dso_local void @_Z5test2IiEvT_iz(i32 noundef %t, i32 noundef %n, ...) sanitize_memory { ; CHECK-LABEL: define linkonce_odr dso_local void @_Z5test2IiEvT_iz( @@ -614,7 +614,7 @@ define linkonce_odr dso_local void @_Z5test2IiEvT_iz(i32 noundef %t, i32 noundef ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false) ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr @@ -634,16 +634,16 @@ define linkonce_odr dso_local void @_Z5test2IiEvT_iz(i32 noundef %t, i32 noundef ; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]]) ; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: ret void ; entry: %args = alloca [1 x %struct.__va_list_tag], align 16 - call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6 + call void @llvm.lifetime.start.p0(ptr nonnull %args) #6 call void @llvm.va_start(ptr nonnull %args) call void @_Z3usePv(ptr noundef nonnull %args) call void @llvm.va_end(ptr nonnull %args) - call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6 + call void @llvm.lifetime.end.p0(ptr nonnull %args) #6 ret void } @@ -658,7 +658,7 @@ define linkonce_odr dso_local void @_Z5test2IfEvT_iz(float noundef %t, i32 nound ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false) ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr @@ -678,16 +678,16 @@ define linkonce_odr dso_local void @_Z5test2IfEvT_iz(float noundef %t, i32 nound ; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]]) ; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: ret void ; entry: %args = alloca [1 x %struct.__va_list_tag], align 16 - call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6 + call void @llvm.lifetime.start.p0(ptr nonnull %args) #6 call void @llvm.va_start(ptr nonnull %args) call void @_Z3usePv(ptr noundef nonnull %args) call void @llvm.va_end(ptr nonnull %args) - call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6 + call void @llvm.lifetime.end.p0(ptr nonnull %args) #6 ret void } @@ -702,7 +702,7 @@ define linkonce_odr dso_local void @_Z5test2IdEvT_iz(double noundef %t, i32 noun ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false) ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr @@ -722,16 +722,16 @@ define linkonce_odr dso_local void @_Z5test2IdEvT_iz(double noundef %t, i32 noun ; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]]) ; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: ret void ; entry: %args = alloca [1 x %struct.__va_list_tag], align 16 - call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6 + call void @llvm.lifetime.start.p0(ptr nonnull %args) #6 call void @llvm.va_start(ptr nonnull %args) call void @_Z3usePv(ptr noundef nonnull %args) call void @llvm.va_end(ptr nonnull %args) - call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6 + call void @llvm.lifetime.end.p0(ptr nonnull %args) #6 ret void } @@ -746,7 +746,7 @@ define linkonce_odr dso_local void @_Z5test2IeEvT_iz(x86_fp80 noundef %t, i32 no ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false) ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr @@ -766,16 +766,16 @@ define linkonce_odr dso_local void @_Z5test2IeEvT_iz(x86_fp80 noundef %t, i32 no ; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]]) ; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: ret void ; entry: %args = alloca [1 x %struct.__va_list_tag], align 16 - call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6 + call void @llvm.lifetime.start.p0(ptr nonnull %args) #6 call void @llvm.va_start(ptr nonnull %args) call void @_Z3usePv(ptr noundef nonnull %args) call void @llvm.va_end(ptr nonnull %args) - call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6 + call void @llvm.lifetime.end.p0(ptr nonnull %args) #6 ret void } @@ -790,7 +790,7 @@ define linkonce_odr dso_local void @_Z5test2I6IntIntEvT_iz(i64 %t.coerce, i32 no ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false) ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr @@ -810,16 +810,16 @@ define linkonce_odr dso_local void @_Z5test2I6IntIntEvT_iz(i64 %t.coerce, i32 no ; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]]) ; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: ret void ; entry: %args = alloca [1 x %struct.__va_list_tag], align 16 - call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6 + call void @llvm.lifetime.start.p0(ptr nonnull %args) #6 call void @llvm.va_start(ptr nonnull %args) call void @_Z3usePv(ptr noundef nonnull %args) call void @llvm.va_end(ptr nonnull %args) - call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6 + call void @llvm.lifetime.end.p0(ptr nonnull %args) #6 ret void } @@ -834,7 +834,7 @@ define linkonce_odr dso_local void @_Z5test2I10Int64Int64EvT_iz(i64 %t.coerce0, ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false) ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr @@ -854,16 +854,16 @@ define linkonce_odr dso_local void @_Z5test2I10Int64Int64EvT_iz(i64 %t.coerce0, ; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]]) ; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: ret void ; entry: %args = alloca [1 x %struct.__va_list_tag], align 16 - call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6 + call void @llvm.lifetime.start.p0(ptr nonnull %args) #6 call void @llvm.va_start(ptr nonnull %args) call void @_Z3usePv(ptr noundef nonnull %args) call void @llvm.va_end(ptr nonnull %args) - call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6 + call void @llvm.lifetime.end.p0(ptr nonnull %args) #6 ret void } @@ -878,7 +878,7 @@ define linkonce_odr dso_local void @_Z5test2I12DoubleDoubleEvT_iz(double %t.coer ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false) ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr @@ -898,16 +898,16 @@ define linkonce_odr dso_local void @_Z5test2I12DoubleDoubleEvT_iz(double %t.coer ; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]]) ; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: ret void ; entry: %args = alloca [1 x %struct.__va_list_tag], align 16 - call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6 + call void @llvm.lifetime.start.p0(ptr nonnull %args) #6 call void @llvm.va_start(ptr nonnull %args) call void @_Z3usePv(ptr noundef nonnull %args) call void @llvm.va_end(ptr nonnull %args) - call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6 + call void @llvm.lifetime.end.p0(ptr nonnull %args) #6 ret void } @@ -922,7 +922,7 @@ define linkonce_odr dso_local void @_Z5test2I7Double4EvT_iz(ptr noundef byval(%s ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false) ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr @@ -942,16 +942,16 @@ define linkonce_odr dso_local void @_Z5test2I7Double4EvT_iz(ptr noundef byval(%s ; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]]) ; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: ret void ; entry: %args = alloca [1 x %struct.__va_list_tag], align 16 - call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6 + call void @llvm.lifetime.start.p0(ptr nonnull %args) #6 call void @llvm.va_start(ptr nonnull %args) call void @_Z3usePv(ptr noundef nonnull %args) call void @llvm.va_end(ptr nonnull %args) - call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6 + call void @llvm.lifetime.end.p0(ptr nonnull %args) #6 ret void } @@ -966,7 +966,7 @@ define linkonce_odr dso_local void @_Z5test2I11DoubleFloatEvT_iz(double %t.coerc ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false) ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr @@ -986,16 +986,16 @@ define linkonce_odr dso_local void @_Z5test2I11DoubleFloatEvT_iz(double %t.coerc ; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]]) ; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: ret void ; entry: %args = alloca [1 x %struct.__va_list_tag], align 16 - call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6 + call void @llvm.lifetime.start.p0(ptr nonnull %args) #6 call void @llvm.va_start(ptr nonnull %args) call void @_Z3usePv(ptr noundef nonnull %args) call void @llvm.va_end(ptr nonnull %args) - call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6 + call void @llvm.lifetime.end.p0(ptr nonnull %args) #6 ret void } @@ -1010,7 +1010,7 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble2EvT_iz(ptr noundef byv ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false) ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr @@ -1030,16 +1030,16 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble2EvT_iz(ptr noundef byv ; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]]) ; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: ret void ; entry: %args = alloca [1 x %struct.__va_list_tag], align 16 - call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6 + call void @llvm.lifetime.start.p0(ptr nonnull %args) #6 call void @llvm.va_start(ptr nonnull %args) call void @_Z3usePv(ptr noundef nonnull %args) call void @llvm.va_end(ptr nonnull %args) - call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6 + call void @llvm.lifetime.end.p0(ptr nonnull %args) #6 ret void } @@ -1054,7 +1054,7 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble4EvT_iz(ptr noundef byv ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false) ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr @@ -1074,16 +1074,16 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble4EvT_iz(ptr noundef byv ; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]]) ; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]]) ; CHECK-NEXT: ret void ; entry: %args = alloca [1 x %struct.__va_list_tag], align 16 - call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6 + call void @llvm.lifetime.start.p0(ptr nonnull %args) #6 call void @llvm.va_start(ptr nonnull %args) call void @_Z3usePv(ptr noundef nonnull %args) call void @llvm.va_end(ptr nonnull %args) - call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6 + call void @llvm.lifetime.end.p0(ptr nonnull %args) #6 ret void } diff --git a/llvm/test/Instrumentation/MemorySanitizer/msan_debug_info.ll b/llvm/test/Instrumentation/MemorySanitizer/msan_debug_info.ll index f07f3ad..04fdd23 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/msan_debug_info.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/msan_debug_info.ll @@ -664,8 +664,8 @@ entry: declare i32 @NoSanitizeMemoryUndefHelper(i32 %x) -declare void @llvm.lifetime.start.p0(i64 immarg %0, ptr nocapture %1) -declare void @llvm.lifetime.end.p0(i64 immarg %0, ptr nocapture %1) +declare void @llvm.lifetime.start.p0(ptr nocapture %1) +declare void @llvm.lifetime.end.p0(ptr nocapture %1) declare void @foo8(ptr nocapture) @@ -674,7 +674,7 @@ define void @msan() sanitize_memory { ; CHECK-NEXT: entry: ; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]] ; CHECK-NEXT: [[TEXT:%.*]] = alloca i8, align 1, !dbg [[DBG1]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TEXT]]), !dbg [[DBG7]] +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TEXT]]), !dbg [[DBG7]] ; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[TEXT]] to i64, !dbg [[DBG7]] ; CHECK-NEXT: [[TMP1:%.*]] = xor i64 [[TMP0]], 87960930222080, !dbg [[DBG7]] ; CHECK-NEXT: [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr, !dbg [[DBG7]] @@ -685,13 +685,13 @@ define void @msan() sanitize_memory { ; CHECK-NEXT: call void @__msan_set_alloca_origin_with_descr(ptr [[TEXT]], i64 1, ptr @[[GLOB6:[0-9]+]], ptr @[[GLOB7:[0-9]+]]), !dbg [[DBG7]] ; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8, !dbg [[DBG8]] ; CHECK-NEXT: call void @foo8(ptr [[TEXT]]), !dbg [[DBG8]] -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TEXT]]), !dbg +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TEXT]]), !dbg ; CHECK-NEXT: ret void, !dbg ; entry: %text = alloca i8, align 1, !dbg !10 - call void @llvm.lifetime.start.p0(i64 1, ptr %text), !dbg !11 + call void @llvm.lifetime.start.p0(ptr %text), !dbg !11 call void @foo8(ptr %text), !dbg !12 - call void @llvm.lifetime.end.p0(i64 1, ptr %text), !dbg !13 + call void @llvm.lifetime.end.p0(ptr %text), !dbg !13 ret void, !dbg !14 } diff --git a/llvm/test/Instrumentation/MemorySanitizer/msan_llvm_launder_invariant.ll b/llvm/test/Instrumentation/MemorySanitizer/msan_llvm_launder_invariant.ll index 2cc8fd6..5779367 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/msan_llvm_launder_invariant.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/msan_llvm_launder_invariant.ll @@ -12,14 +12,14 @@ target triple = "x86_64-unknown-linux-gnu" define dso_local ptr @_Z1fv() local_unnamed_addr #0 { entry: %p = alloca ptr, align 8 - call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %p) + call void @llvm.lifetime.start.p0(ptr nonnull %p) %0 = load i8, ptr @flag, align 1 %tobool = icmp ne i8 %0, 0 %call = call zeroext i1 @_Z2f1PPvb(ptr nonnull %p, i1 zeroext %tobool) %1 = load ptr, ptr %p, align 8 %2 = call ptr @llvm.launder.invariant.group.p0(ptr %1) %retval.0 = select i1 %call, ptr %2, ptr null - call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %p) + call void @llvm.lifetime.end.p0(ptr nonnull %p) ret ptr %retval.0 } @@ -29,8 +29,8 @@ declare dso_local zeroext i1 @_Z2f1PPvb(ptr, i1 zeroext) local_unnamed_addr declare ptr @llvm.launder.invariant.group.p0(ptr) -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) attributes #0 = { sanitize_memory uwtable } diff --git a/llvm/test/Instrumentation/TypeSanitizer/alloca.ll b/llvm/test/Instrumentation/TypeSanitizer/alloca.ll index fc72631..deddecf 100644 --- a/llvm/test/Instrumentation/TypeSanitizer/alloca.ll +++ b/llvm/test/Instrumentation/TypeSanitizer/alloca.ll @@ -48,7 +48,7 @@ define void @alloca_lifetime_test(i1 %c) sanitize_type { ; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[TMP7]], [[SHADOW_BASE]] ; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 80, i1 false) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 10, ptr [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]]) ; CHECK-NEXT: call void @alloca_test_use(ptr [[X]]) ; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[X]] to i64 ; CHECK-NEXT: [[TMP11:%.*]] = and i64 [[TMP10]], [[APP_MEM_MASK]] @@ -56,7 +56,7 @@ define void @alloca_lifetime_test(i1 %c) sanitize_type { ; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[TMP12]], [[SHADOW_BASE]] ; CHECK-NEXT: [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP14]], i8 0, i64 80, i1 false) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 10, ptr [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]]) ; CHECK-NEXT: br i1 [[C:%.*]], label [[LOOP]], label [[EXIT:%.*]] ; CHECK: exit: ; CHECK-NEXT: ret void @@ -66,9 +66,9 @@ entry: br label %loop loop: - call void @llvm.lifetime.start.p0(i64 10, ptr %x) + call void @llvm.lifetime.start.p0(ptr %x) call void @alloca_test_use(ptr %x) - call void @llvm.lifetime.end.p0(i64 10, ptr %x) + call void @llvm.lifetime.end.p0(ptr %x) br i1 %c, label %loop, label %exit exit: @@ -99,7 +99,7 @@ define void @dynamic_alloca_lifetime_test(i1 %c, i64 %n) sanitize_type { ; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr ; CHECK-NEXT: [[TMP13:%.*]] = shl i64 [[TMP7]], 3 ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP12]], i8 0, i64 [[TMP13]], i1 false) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]]) ; CHECK-NEXT: call void @alloca_test_use(ptr [[X]]) ; CHECK-NEXT: [[TMP14:%.*]] = mul i64 [[N]], 4 ; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[X]] to i64 @@ -109,7 +109,7 @@ define void @dynamic_alloca_lifetime_test(i1 %c, i64 %n) sanitize_type { ; CHECK-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr ; CHECK-NEXT: [[TMP20:%.*]] = shl i64 [[TMP14]], 3 ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP19]], i8 0, i64 [[TMP20]], i1 false) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]]) ; CHECK-NEXT: br i1 [[C:%.*]], label [[LOOP]], label [[EXIT:%.*]] ; CHECK: exit: ; CHECK-NEXT: ret void @@ -119,9 +119,9 @@ entry: br label %loop loop: - call void @llvm.lifetime.start.p0(i64 -1, ptr %x) + call void @llvm.lifetime.start.p0(ptr %x) call void @alloca_test_use(ptr %x) - call void @llvm.lifetime.end.p0(i64 -1, ptr %x) + call void @llvm.lifetime.end.p0(ptr %x) br i1 %c, label %loop, label %exit exit: diff --git a/llvm/test/TableGen/intrinsic-attrs.td b/llvm/test/TableGen/intrinsic-attrs.td index 92a90dc..bcded0cd2 100644 --- a/llvm/test/TableGen/intrinsic-attrs.td +++ b/llvm/test/TableGen/intrinsic-attrs.td @@ -27,14 +27,16 @@ def int_deref_ptr_ret : Intrinsic<[llvm_ptr_ty], [], [Dereferenceable<RetIndex, // CHECK: static constexpr uint16_t IntrinsicsToAttributesMap[] = { // CHECK: 0 << 8 | 0, // llvm.deref.ptr.ret // CHECK: 1 << 8 | 1, // llvm.random.gen +// CHECK: }; // IntrinsicsToAttributesMap + +// CHECK: static constexpr ArgNoAttrIDPair ArgAttrIdTable[] = { +// CHECK-NEXT: {0, 0}, +// CHECK: }; // ArgAttrIdTable + +// CHECK: static constexpr ArgAttributesInfo ArgAttributesInfoTable[] = { +// CHECK-NEXT: {0, 1}, +// CHECK-NEXT: {0, 0}, +// CHECK-NEXT: }; // ArgAttributesInfoTable // CHECK: getAttributes(LLVMContext &C, ID id, // CHECK-NEXT: FunctionType *FT) { -// CHECK: case 1: -// CHECK-NEXT: HasFnAttr = true; -// CHECK-NEXT: break; -// CHECK-NEXT: case 0: -// CHECK-NEXT: AS[0] = {0, getIntrinsicArgAttributeSet(C, 0, FT->getContainedType(0))}; -// CHECK-NEXT: HasFnAttr = true; -// CHECK-NEXT: NumAttrs = 1 -// CHECK-NEXT: break; diff --git a/llvm/test/Transforms/AddDiscriminators/call.ll b/llvm/test/Transforms/AddDiscriminators/call.ll index d093c65..93d3aa4 100644 --- a/llvm/test/Transforms/AddDiscriminators/call.ll +++ b/llvm/test/Transforms/AddDiscriminators/call.ll @@ -12,8 +12,8 @@ define void @_Z3foov() #0 !dbg !4 { call void @_Z3barv(), !dbg !10 ; CHECK: call void @_Z3barv(), !dbg ![[CALL0:[0-9]+]] %a = alloca [100 x i8], align 16 - call void @llvm.lifetime.start.p0(i64 100, ptr %a), !dbg !11 - call void @llvm.lifetime.end.p0(i64 100, ptr %a), !dbg !11 + call void @llvm.lifetime.start.p0(ptr %a), !dbg !11 + call void @llvm.lifetime.end.p0(ptr %a), !dbg !11 call void @_Z3barv(), !dbg !11 ; CHECK: call void @_Z3barv(), !dbg ![[CALL1:[0-9]+]] call void @_Z3barv(), !dbg !12 @@ -22,8 +22,8 @@ define void @_Z3foov() #0 !dbg !4 { } declare void @_Z3barv() #1 -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind argmemonly -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind argmemonly +declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind argmemonly +declare void @llvm.lifetime.end.p0(ptr nocapture) nounwind argmemonly attributes #0 = { uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" } attributes #1 = { "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" } diff --git a/llvm/test/Transforms/AtomicExpand/PowerPC/cmpxchg.ll b/llvm/test/Transforms/AtomicExpand/PowerPC/cmpxchg.ll index cc51a00d..9bf8a51 100644 --- a/llvm/test/Transforms/AtomicExpand/PowerPC/cmpxchg.ll +++ b/llvm/test/Transforms/AtomicExpand/PowerPC/cmpxchg.ll @@ -31,11 +31,11 @@ define i1 @test_cmpxchg_seq_cst(ptr %addr, i128 %desire, i128 %new) { ; PWR7-LABEL: @test_cmpxchg_seq_cst( ; PWR7-NEXT: entry: ; PWR7-NEXT: [[TMP0:%.*]] = alloca i128, align 16 -; PWR7-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[TMP0]]) +; PWR7-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP0]]) ; PWR7-NEXT: store i128 [[DESIRE:%.*]], ptr [[TMP0]], align 16 ; PWR7-NEXT: [[TMP1:%.*]] = call zeroext i1 @__atomic_compare_exchange_16(ptr [[ADDR:%.*]], ptr [[TMP0]], i128 [[NEW:%.*]], i32 5, i32 5) ; PWR7-NEXT: [[TMP2:%.*]] = load i128, ptr [[TMP0]], align 16 -; PWR7-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[TMP0]]) +; PWR7-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP0]]) ; PWR7-NEXT: [[TMP3:%.*]] = insertvalue { i128, i1 } poison, i128 [[TMP2]], 0 ; PWR7-NEXT: [[TMP4:%.*]] = insertvalue { i128, i1 } [[TMP3]], i1 [[TMP1]], 1 ; PWR7-NEXT: [[SUCC:%.*]] = extractvalue { i128, i1 } [[TMP4]], 1 diff --git a/llvm/test/Transforms/AtomicExpand/RISCV/atomicrmw-fp.ll b/llvm/test/Transforms/AtomicExpand/RISCV/atomicrmw-fp.ll index 2cbb179..60fb248 100644 --- a/llvm/test/Transforms/AtomicExpand/RISCV/atomicrmw-fp.ll +++ b/llvm/test/Transforms/AtomicExpand/RISCV/atomicrmw-fp.ll @@ -9,12 +9,12 @@ define float @test_atomicrmw_fadd_f32(ptr %ptr, float %value) { ; CHECK: atomicrmw.start: ; CHECK-NEXT: [[LOADED:%.*]] = phi float [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ] ; CHECK-NEXT: [[NEW:%.*]] = fadd float [[LOADED]], [[VALUE:%.*]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store float [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP3:%.*]] = bitcast float [[NEW]] to i32 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[PTR]], ptr [[TMP1]], i32 [[TMP3]], i32 5, i32 5) ; CHECK-NEXT: [[TMP5:%.*]] = load float, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { float, i1 } poison, float [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { float, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { float, i1 } [[TMP7]], 1 @@ -35,12 +35,12 @@ define float @test_atomicrmw_fsub_f32(ptr %ptr, float %value) { ; CHECK: atomicrmw.start: ; CHECK-NEXT: [[LOADED:%.*]] = phi float [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ] ; CHECK-NEXT: [[NEW:%.*]] = fsub float [[LOADED]], [[VALUE:%.*]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store float [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP3:%.*]] = bitcast float [[NEW]] to i32 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[PTR]], ptr [[TMP1]], i32 [[TMP3]], i32 5, i32 5) ; CHECK-NEXT: [[TMP5:%.*]] = load float, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { float, i1 } poison, float [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { float, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { float, i1 } [[TMP7]], 1 diff --git a/llvm/test/Transforms/AtomicExpand/SPARC/libcalls.ll b/llvm/test/Transforms/AtomicExpand/SPARC/libcalls.ll index 682c1e6..1d6a32c 100644 --- a/llvm/test/Transforms/AtomicExpand/SPARC/libcalls.ll +++ b/llvm/test/Transforms/AtomicExpand/SPARC/libcalls.ll @@ -38,11 +38,11 @@ define i16 @test_exchange_i16(ptr %arg, i16 %val) { ; CHECK-LABEL: @test_cmpxchg_i16( ; CHECK: %1 = alloca i16, align 2 -; CHECK: call void @llvm.lifetime.start.p0(i64 2, ptr %1) +; CHECK: call void @llvm.lifetime.start.p0(ptr %1) ; CHECK: store i16 %old, ptr %1, align 2 ; CHECK: %2 = call zeroext i1 @__atomic_compare_exchange_2(ptr %arg, ptr %1, i16 %new, i32 5, i32 0) ; CHECK: %3 = load i16, ptr %1, align 2 -; CHECK: call void @llvm.lifetime.end.p0(i64 2, ptr %1) +; CHECK: call void @llvm.lifetime.end.p0(ptr %1) ; CHECK: %4 = insertvalue { i16, i1 } poison, i16 %3, 0 ; CHECK: %5 = insertvalue { i16, i1 } %4, i1 %2, 1 ; CHECK: %ret = extractvalue { i16, i1 } %5, 0 @@ -68,10 +68,10 @@ define i16 @test_add_i16(ptr %arg, i16 %val) { ; CHECK-LABEL: @test_load_i128( ; CHECK: %1 = alloca i128, align 8 -; CHECK: call void @llvm.lifetime.start.p0(i64 16, ptr %1) +; CHECK: call void @llvm.lifetime.start.p0(ptr %1) ; CHECK: call void @__atomic_load(i32 16, ptr %arg, ptr %1, i32 5) ; CHECK: %2 = load i128, ptr %1, align 8 -; CHECK: call void @llvm.lifetime.end.p0(i64 16, ptr %1) +; CHECK: call void @llvm.lifetime.end.p0(ptr %1) ; CHECK: ret i128 %2 define i128 @test_load_i128(ptr %arg) { %ret = load atomic i128, ptr %arg seq_cst, align 16 @@ -80,10 +80,10 @@ define i128 @test_load_i128(ptr %arg) { ; CHECK-LABEL: @test_store_i128( ; CHECK: %1 = alloca i128, align 8 -; CHECK: call void @llvm.lifetime.start.p0(i64 16, ptr %1) +; CHECK: call void @llvm.lifetime.start.p0(ptr %1) ; CHECK: store i128 %val, ptr %1, align 8 ; CHECK: call void @__atomic_store(i32 16, ptr %arg, ptr %1, i32 5) -; CHECK: call void @llvm.lifetime.end.p0(i64 16, ptr %1) +; CHECK: call void @llvm.lifetime.end.p0(ptr %1) ; CHECK: ret void define void @test_store_i128(ptr %arg, i128 %val) { store atomic i128 %val, ptr %arg seq_cst, align 16 @@ -92,14 +92,14 @@ define void @test_store_i128(ptr %arg, i128 %val) { ; CHECK-LABEL: @test_exchange_i128( ; CHECK: %1 = alloca i128, align 8 -; CHECK: call void @llvm.lifetime.start.p0(i64 16, ptr %1) +; CHECK: call void @llvm.lifetime.start.p0(ptr %1) ; CHECK: store i128 %val, ptr %1, align 8 ; CHECK: %2 = alloca i128, align 8 -; CHECK: call void @llvm.lifetime.start.p0(i64 16, ptr %2) +; CHECK: call void @llvm.lifetime.start.p0(ptr %2) ; CHECK: call void @__atomic_exchange(i32 16, ptr %arg, ptr %1, ptr %2, i32 5) -; CHECK: call void @llvm.lifetime.end.p0(i64 16, ptr %1) +; CHECK: call void @llvm.lifetime.end.p0(ptr %1) ; CHECK: %3 = load i128, ptr %2, align 8 -; CHECK: call void @llvm.lifetime.end.p0(i64 16, ptr %2) +; CHECK: call void @llvm.lifetime.end.p0(ptr %2) ; CHECK: ret i128 %3 define i128 @test_exchange_i128(ptr %arg, i128 %val) { %ret = atomicrmw xchg ptr %arg, i128 %val seq_cst @@ -108,15 +108,15 @@ define i128 @test_exchange_i128(ptr %arg, i128 %val) { ; CHECK-LABEL: @test_cmpxchg_i128( ; CHECK: %1 = alloca i128, align 8 -; CHECK: call void @llvm.lifetime.start.p0(i64 16, ptr %1) +; CHECK: call void @llvm.lifetime.start.p0(ptr %1) ; CHECK: store i128 %old, ptr %1, align 8 ; CHECK: %2 = alloca i128, align 8 -; CHECK: call void @llvm.lifetime.start.p0(i64 16, ptr %2) +; CHECK: call void @llvm.lifetime.start.p0(ptr %2) ; CHECK: store i128 %new, ptr %2, align 8 ; CHECK: %3 = call zeroext i1 @__atomic_compare_exchange(i32 16, ptr %arg, ptr %1, ptr %2, i32 5, i32 0) -; CHECK: call void @llvm.lifetime.end.p0(i64 16, ptr %2) +; CHECK: call void @llvm.lifetime.end.p0(ptr %2) ; CHECK: %4 = load i128, ptr %1, align 8 -; CHECK: call void @llvm.lifetime.end.p0(i64 16, ptr %1) +; CHECK: call void @llvm.lifetime.end.p0(ptr %1) ; CHECK: %5 = insertvalue { i128, i1 } poison, i128 %4, 0 ; CHECK: %6 = insertvalue { i128, i1 } %5, i1 %3, 1 ; CHECK: %ret = extractvalue { i128, i1 } %6, 0 @@ -139,14 +139,14 @@ define i128 @test_cmpxchg_i128(ptr %arg, i128 %old, i128 %new) { ; CHECK:atomicrmw.start: ; CHECK: %loaded = phi i128 [ %3, %0 ], [ %newloaded, %atomicrmw.start ] ; CHECK: %new = add i128 %loaded, %val -; CHECK: call void @llvm.lifetime.start.p0(i64 16, ptr %1) +; CHECK: call void @llvm.lifetime.start.p0(ptr %1) ; CHECK: store i128 %loaded, ptr %1, align 8 -; CHECK: call void @llvm.lifetime.start.p0(i64 16, ptr %2) +; CHECK: call void @llvm.lifetime.start.p0(ptr %2) ; CHECK: store i128 %new, ptr %2, align 8 ; CHECK: %4 = call zeroext i1 @__atomic_compare_exchange(i32 16, ptr %arg, ptr %1, ptr %2, i32 5, i32 5) -; CHECK: call void @llvm.lifetime.end.p0(i64 16, ptr %2) +; CHECK: call void @llvm.lifetime.end.p0(ptr %2) ; CHECK: %5 = load i128, ptr %1, align 8 -; CHECK: call void @llvm.lifetime.end.p0(i64 16, ptr %1) +; CHECK: call void @llvm.lifetime.end.p0(ptr %1) ; CHECK: %6 = insertvalue { i128, i1 } poison, i128 %5, 0 ; CHECK: %7 = insertvalue { i128, i1 } %6, i1 %4, 1 ; CHECK: %success = extractvalue { i128, i1 } %7, 1 @@ -181,12 +181,12 @@ define void @test_store_double(ptr %arg, double %val) { ; CHECK-LABEL: @test_cmpxchg_ptr( ; CHECK: %1 = alloca ptr, align 4 -; CHECK: call void @llvm.lifetime.start.p0(i64 4, ptr %1) +; CHECK: call void @llvm.lifetime.start.p0(ptr %1) ; CHECK: store ptr %old, ptr %1, align 4 ; CHECK: %2 = ptrtoint ptr %new to i32 ; CHECK: %3 = call zeroext i1 @__atomic_compare_exchange_4(ptr %arg, ptr %1, i32 %2, i32 5, i32 2) ; CHECK: %4 = load ptr, ptr %1, align 4 -; CHECK: call void @llvm.lifetime.end.p0(i64 4, ptr %1) +; CHECK: call void @llvm.lifetime.end.p0(ptr %1) ; CHECK: %5 = insertvalue { ptr, i1 } poison, ptr %4, 0 ; CHECK: %6 = insertvalue { ptr, i1 } %5, i1 %3, 1 ; CHECK: %ret = extractvalue { ptr, i1 } %6, 0 @@ -202,10 +202,10 @@ define ptr @test_cmpxchg_ptr(ptr %arg, ptr %old, ptr %new) { ; CHECK-LABEL: @test_store_fp128 ; CHECK: %1 = alloca fp128, align 8 -; CHECK: call void @llvm.lifetime.start.p0(i64 16, ptr %1) +; CHECK: call void @llvm.lifetime.start.p0(ptr %1) ; CHECK: store fp128 %val, ptr %1, align 8 ; CHECK: call void @__atomic_store(i32 16, ptr %arg, ptr %1, i32 5) -; CHECK: call void @llvm.lifetime.end.p0(i64 16, ptr %1) +; CHECK: call void @llvm.lifetime.end.p0(ptr %1) ; CHECK: ret void define void @test_store_fp128(ptr %arg, fp128 %val) { store atomic fp128 %val, ptr %arg seq_cst, align 16 diff --git a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-libcall.ll b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-libcall.ll index 20a9e9f..fda296b 100644 --- a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-libcall.ll +++ b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-libcall.ll @@ -5,10 +5,10 @@ define i256 @atomic_load256_libcall(ptr %ptr) nounwind { ; CHECK-LABEL: @atomic_load256_libcall( ; CHECK-NEXT: [[TMP1:%.*]] = alloca i256, align 16 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: call void @__atomic_load(i32 32, ptr [[PTR:%.*]], ptr [[TMP1]], i32 0) ; CHECK-NEXT: [[TMP2:%.*]] = load i256, ptr [[TMP1]], align 16 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: ret i256 [[TMP2]] ; %result = load atomic i256, ptr %ptr unordered, align 16 @@ -19,10 +19,10 @@ define i256 @atomic_load256_libcall_as1(ptr addrspace(1) %ptr) nounwind { ; CHECK-LABEL: @atomic_load256_libcall_as1( ; CHECK-NEXT: [[TMP1:%.*]] = addrspacecast ptr addrspace(1) [[PTR:%.*]] to ptr ; CHECK-NEXT: [[TMP2:%.*]] = alloca i256, align 16 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[TMP2]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP2]]) ; CHECK-NEXT: call void @__atomic_load(i32 32, ptr [[TMP1]], ptr [[TMP2]], i32 0) ; CHECK-NEXT: [[TMP3:%.*]] = load i256, ptr [[TMP2]], align 16 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr [[TMP2]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP2]]) ; CHECK-NEXT: ret i256 [[TMP3]] ; %result = load atomic i256, ptr addrspace(1) %ptr unordered, align 16 diff --git a/llvm/test/Transforms/AtomicExpand/Xtensa/atomicrmw-expand.ll b/llvm/test/Transforms/AtomicExpand/Xtensa/atomicrmw-expand.ll index 59916cc..647f187 100644 --- a/llvm/test/Transforms/AtomicExpand/Xtensa/atomicrmw-expand.ll +++ b/llvm/test/Transforms/AtomicExpand/Xtensa/atomicrmw-expand.ll @@ -361,11 +361,11 @@ define i8 @atomicrmw_max_i8_monotonic(ptr %a, i8 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i8 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 0, i32 0) ; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1 @@ -388,11 +388,11 @@ define i8 @atomicrmw_max_i8_acquire(ptr %a, i8 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i8 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 2, i32 2) ; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1 @@ -415,11 +415,11 @@ define i8 @atomicrmw_max_i8_release(ptr %a, i8 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i8 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 3, i32 0) ; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1 @@ -442,11 +442,11 @@ define i8 @atomicrmw_max_i8_acq_rel(ptr %a, i8 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i8 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 4, i32 2) ; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1 @@ -469,11 +469,11 @@ define i8 @atomicrmw_max_i8_seq_cst(ptr %a, i8 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i8 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 5, i32 5) ; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1 @@ -496,11 +496,11 @@ define i8 @atomicrmw_min_i8_monotonic(ptr %a, i8 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i8 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 0, i32 0) ; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1 @@ -523,11 +523,11 @@ define i8 @atomicrmw_min_i8_acquire(ptr %a, i8 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i8 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 2, i32 2) ; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1 @@ -550,11 +550,11 @@ define i8 @atomicrmw_min_i8_release(ptr %a, i8 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i8 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 3, i32 0) ; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1 @@ -577,11 +577,11 @@ define i8 @atomicrmw_min_i8_acq_rel(ptr %a, i8 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i8 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 4, i32 2) ; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1 @@ -604,11 +604,11 @@ define i8 @atomicrmw_min_i8_seq_cst(ptr %a, i8 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i8 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 5, i32 5) ; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1 @@ -631,11 +631,11 @@ define i8 @atomicrmw_umax_i8_monotonic(ptr %a, i8 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i8 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 0, i32 0) ; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1 @@ -658,11 +658,11 @@ define i8 @atomicrmw_umax_i8_acquire(ptr %a, i8 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i8 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 2, i32 2) ; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1 @@ -685,11 +685,11 @@ define i8 @atomicrmw_umax_i8_release(ptr %a, i8 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i8 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 3, i32 0) ; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1 @@ -712,11 +712,11 @@ define i8 @atomicrmw_umax_i8_acq_rel(ptr %a, i8 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i8 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 4, i32 2) ; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1 @@ -739,11 +739,11 @@ define i8 @atomicrmw_umax_i8_seq_cst(ptr %a, i8 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i8 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 5, i32 5) ; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1 @@ -766,11 +766,11 @@ define i8 @atomicrmw_umin_i8_monotonic(ptr %a, i8 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i8 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 0, i32 0) ; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1 @@ -793,11 +793,11 @@ define i8 @atomicrmw_umin_i8_acquire(ptr %a, i8 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i8 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 2, i32 2) ; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1 @@ -820,11 +820,11 @@ define i8 @atomicrmw_umin_i8_release(ptr %a, i8 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i8 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 3, i32 0) ; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1 @@ -847,11 +847,11 @@ define i8 @atomicrmw_umin_i8_acq_rel(ptr %a, i8 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i8 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 4, i32 2) ; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1 @@ -874,11 +874,11 @@ define i8 @atomicrmw_umin_i8_seq_cst(ptr %a, i8 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i8 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 5, i32 5) ; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1 @@ -1251,11 +1251,11 @@ define i16 @atomicrmw_max_i16_monotonic(ptr %a, i16 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i16 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 0, i32 0) ; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1 @@ -1278,11 +1278,11 @@ define i16 @atomicrmw_max_i16_acquire(ptr %a, i16 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i16 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 2, i32 2) ; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1 @@ -1305,11 +1305,11 @@ define i16 @atomicrmw_max_i16_release(ptr %a, i16 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i16 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 3, i32 0) ; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1 @@ -1332,11 +1332,11 @@ define i16 @atomicrmw_max_i16_acq_rel(ptr %a, i16 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i16 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 4, i32 2) ; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1 @@ -1359,11 +1359,11 @@ define i16 @atomicrmw_max_i16_seq_cst(ptr %a, i16 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i16 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 5, i32 5) ; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1 @@ -1386,11 +1386,11 @@ define i16 @atomicrmw_min_i16_monotonic(ptr %a, i16 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i16 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 0, i32 0) ; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1 @@ -1413,11 +1413,11 @@ define i16 @atomicrmw_min_i16_acquire(ptr %a, i16 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i16 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 2, i32 2) ; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1 @@ -1440,11 +1440,11 @@ define i16 @atomicrmw_min_i16_release(ptr %a, i16 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i16 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 3, i32 0) ; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1 @@ -1467,11 +1467,11 @@ define i16 @atomicrmw_min_i16_acq_rel(ptr %a, i16 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i16 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 4, i32 2) ; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1 @@ -1494,11 +1494,11 @@ define i16 @atomicrmw_min_i16_seq_cst(ptr %a, i16 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i16 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 5, i32 5) ; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1 @@ -1521,11 +1521,11 @@ define i16 @atomicrmw_umax_i16_monotonic(ptr %a, i16 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i16 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 0, i32 0) ; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1 @@ -1548,11 +1548,11 @@ define i16 @atomicrmw_umax_i16_acquire(ptr %a, i16 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i16 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 2, i32 2) ; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1 @@ -1575,11 +1575,11 @@ define i16 @atomicrmw_umax_i16_release(ptr %a, i16 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i16 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 3, i32 0) ; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1 @@ -1602,11 +1602,11 @@ define i16 @atomicrmw_umax_i16_acq_rel(ptr %a, i16 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i16 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 4, i32 2) ; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1 @@ -1629,11 +1629,11 @@ define i16 @atomicrmw_umax_i16_seq_cst(ptr %a, i16 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i16 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 5, i32 5) ; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1 @@ -1656,11 +1656,11 @@ define i16 @atomicrmw_umin_i16_monotonic(ptr %a, i16 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i16 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 0, i32 0) ; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1 @@ -1683,11 +1683,11 @@ define i16 @atomicrmw_umin_i16_acquire(ptr %a, i16 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i16 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 2, i32 2) ; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1 @@ -1710,11 +1710,11 @@ define i16 @atomicrmw_umin_i16_release(ptr %a, i16 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i16 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 3, i32 0) ; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1 @@ -1737,11 +1737,11 @@ define i16 @atomicrmw_umin_i16_acq_rel(ptr %a, i16 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i16 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 4, i32 2) ; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1 @@ -1764,11 +1764,11 @@ define i16 @atomicrmw_umin_i16_seq_cst(ptr %a, i16 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i16 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 5, i32 5) ; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1 @@ -2112,11 +2112,11 @@ define i32 @atomicrmw_max_i32_monotonic(ptr %a, i32 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i32 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 0, i32 0) ; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1 @@ -2139,11 +2139,11 @@ define i32 @atomicrmw_max_i32_acquire(ptr %a, i32 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i32 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 2, i32 2) ; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1 @@ -2166,11 +2166,11 @@ define i32 @atomicrmw_max_i32_release(ptr %a, i32 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i32 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 3, i32 0) ; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1 @@ -2193,11 +2193,11 @@ define i32 @atomicrmw_max_i32_acq_rel(ptr %a, i32 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i32 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 4, i32 2) ; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1 @@ -2220,11 +2220,11 @@ define i32 @atomicrmw_max_i32_seq_cst(ptr %a, i32 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i32 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 5, i32 5) ; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1 @@ -2247,11 +2247,11 @@ define i32 @atomicrmw_min_i32_monotonic(ptr %a, i32 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i32 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 0, i32 0) ; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1 @@ -2274,11 +2274,11 @@ define i32 @atomicrmw_min_i32_acquire(ptr %a, i32 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i32 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 2, i32 2) ; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1 @@ -2301,11 +2301,11 @@ define i32 @atomicrmw_min_i32_release(ptr %a, i32 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i32 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 3, i32 0) ; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1 @@ -2328,11 +2328,11 @@ define i32 @atomicrmw_min_i32_acq_rel(ptr %a, i32 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i32 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 4, i32 2) ; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1 @@ -2355,11 +2355,11 @@ define i32 @atomicrmw_min_i32_seq_cst(ptr %a, i32 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i32 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 5, i32 5) ; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1 @@ -2382,11 +2382,11 @@ define i32 @atomicrmw_umax_i32_monotonic(ptr %a, i32 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i32 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 0, i32 0) ; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1 @@ -2409,11 +2409,11 @@ define i32 @atomicrmw_umax_i32_acquire(ptr %a, i32 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i32 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 2, i32 2) ; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1 @@ -2436,11 +2436,11 @@ define i32 @atomicrmw_umax_i32_release(ptr %a, i32 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i32 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 3, i32 0) ; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1 @@ -2463,11 +2463,11 @@ define i32 @atomicrmw_umax_i32_acq_rel(ptr %a, i32 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i32 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 4, i32 2) ; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1 @@ -2490,11 +2490,11 @@ define i32 @atomicrmw_umax_i32_seq_cst(ptr %a, i32 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i32 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 5, i32 5) ; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1 @@ -2517,11 +2517,11 @@ define i32 @atomicrmw_umin_i32_monotonic(ptr %a, i32 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i32 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 0, i32 0) ; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1 @@ -2544,11 +2544,11 @@ define i32 @atomicrmw_umin_i32_acquire(ptr %a, i32 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i32 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 2, i32 2) ; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1 @@ -2571,11 +2571,11 @@ define i32 @atomicrmw_umin_i32_release(ptr %a, i32 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i32 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 3, i32 0) ; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1 @@ -2598,11 +2598,11 @@ define i32 @atomicrmw_umin_i32_acq_rel(ptr %a, i32 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i32 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 4, i32 2) ; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1 @@ -2625,11 +2625,11 @@ define i32 @atomicrmw_umin_i32_seq_cst(ptr %a, i32 %b) nounwind { ; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ] ; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i32 [[LOADED]], [[B]] ; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 5, i32 5) ; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1 ; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1 diff --git a/llvm/test/Transforms/Attributor/heap_to_stack.ll b/llvm/test/Transforms/Attributor/heap_to_stack.ll index 6719290..d54f713 100644 --- a/llvm/test/Transforms/Attributor/heap_to_stack.ll +++ b/llvm/test/Transforms/Attributor/heap_to_stack.ll @@ -27,7 +27,7 @@ declare i32 @no_return_call() noreturn declare void @free(ptr nocapture) allockind("free") -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind +declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind ;. ; CHECK: @G = internal global ptr undef, align 4 diff --git a/llvm/test/Transforms/Attributor/heap_to_stack_gpu.ll b/llvm/test/Transforms/Attributor/heap_to_stack_gpu.ll index 0be9434..9a6e0680 100644 --- a/llvm/test/Transforms/Attributor/heap_to_stack_gpu.ll +++ b/llvm/test/Transforms/Attributor/heap_to_stack_gpu.ll @@ -32,7 +32,7 @@ declare i32 @no_return_call() noreturn declare void @free(ptr nocapture) -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind +declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind ;. ; CHECK: @G = internal global ptr undef, align 4 diff --git a/llvm/test/Transforms/Attributor/liveness.ll b/llvm/test/Transforms/Attributor/liveness.ll index 874eff6..c112d99 100644 --- a/llvm/test/Transforms/Attributor/liveness.ll +++ b/llvm/test/Transforms/Attributor/liveness.ll @@ -2589,7 +2589,7 @@ define void @bad_gep() { ; TUNIT-NEXT: entry: ; TUNIT-NEXT: [[N1:%.*]] = alloca i8, i32 0, align 1 ; TUNIT-NEXT: [[M2:%.*]] = alloca i8, i32 0, align 1 -; TUNIT-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 1, ptr noalias nofree noundef nonnull captures(none) dereferenceable(1) [[N1]]) #[[ATTR18:[0-9]+]] +; TUNIT-NEXT: call void @llvm.lifetime.start.p0(ptr noalias nofree noundef nonnull captures(none) dereferenceable(1) [[N1]]) #[[ATTR18:[0-9]+]] ; TUNIT-NEXT: br label [[EXIT:%.*]] ; TUNIT: while.body: ; TUNIT-NEXT: unreachable @@ -2598,7 +2598,7 @@ define void @bad_gep() { ; TUNIT: if.end: ; TUNIT-NEXT: unreachable ; TUNIT: exit: -; TUNIT-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 1, ptr noalias nofree noundef nonnull captures(none) dereferenceable(1) [[N1]]) #[[ATTR18]] +; TUNIT-NEXT: call void @llvm.lifetime.end.p0(ptr noalias nofree noundef nonnull captures(none) dereferenceable(1) [[N1]]) #[[ATTR18]] ; TUNIT-NEXT: ret void ; ; CGSCC: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(none) @@ -2607,7 +2607,7 @@ define void @bad_gep() { ; CGSCC-NEXT: entry: ; CGSCC-NEXT: [[N1:%.*]] = alloca i8, i32 0, align 1 ; CGSCC-NEXT: [[M2:%.*]] = alloca i8, i32 0, align 1 -; CGSCC-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 1, ptr noalias nofree noundef nonnull captures(none) dereferenceable(1) [[N1]]) #[[ATTR21:[0-9]+]] +; CGSCC-NEXT: call void @llvm.lifetime.start.p0(ptr noalias nofree noundef nonnull captures(none) dereferenceable(1) [[N1]]) #[[ATTR21:[0-9]+]] ; CGSCC-NEXT: br label [[EXIT:%.*]] ; CGSCC: while.body: ; CGSCC-NEXT: unreachable @@ -2616,13 +2616,13 @@ define void @bad_gep() { ; CGSCC: if.end: ; CGSCC-NEXT: unreachable ; CGSCC: exit: -; CGSCC-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 1, ptr noalias nofree noundef nonnull captures(none) dereferenceable(1) [[N1]]) #[[ATTR21]] +; CGSCC-NEXT: call void @llvm.lifetime.end.p0(ptr noalias nofree noundef nonnull captures(none) dereferenceable(1) [[N1]]) #[[ATTR21]] ; CGSCC-NEXT: ret void ; entry: %n = alloca i8 %m = alloca i8 - call void @llvm.lifetime.start.p0(i64 1, ptr %n) + call void @llvm.lifetime.start.p0(ptr %n) br label %exit while.body: @@ -2640,7 +2640,7 @@ if.end: br i1 %cmp, label %exit, label %while.body exit: - call void @llvm.lifetime.end.p0(i64 1, ptr %n) + call void @llvm.lifetime.end.p0(ptr %n) ret void } @@ -2679,8 +2679,8 @@ b2: declare i1 @bad_gep_helper1(ptr, ptr, ptr) declare void @bad_gep_helper2(i8) -declare void @llvm.lifetime.start.p0(i64 %0, ptr %1) -declare void @llvm.lifetime.end.p0(i64 %0, ptr %1) +declare void @llvm.lifetime.start.p0(ptr %1) +declare void @llvm.lifetime.end.p0(ptr %1) ;. ; TUNIT: attributes #[[ATTR0]] = { nofree noreturn nosync nounwind } ; TUNIT: attributes #[[ATTR1:[0-9]+]] = { memory(none) } diff --git a/llvm/test/Transforms/Attributor/noalias.ll b/llvm/test/Transforms/Attributor/noalias.ll index 46d9f77..b7c295a 100644 --- a/llvm/test/Transforms/Attributor/noalias.ll +++ b/llvm/test/Transforms/Attributor/noalias.ll @@ -577,31 +577,31 @@ define internal fastcc double @strtox(ptr %s, ptr %p, i32 %prec) unnamed_addr { ; TUNIT-SAME: (ptr [[S:%.*]]) unnamed_addr { ; TUNIT-NEXT: entry: ; TUNIT-NEXT: [[F:%.*]] = alloca [[STRUCT__IO_FILE:%.*]], align 8 -; TUNIT-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 144, ptr nofree noundef nonnull align 8 captures(none) dereferenceable(240) [[F]]) #[[ATTR13:[0-9]+]] +; TUNIT-NEXT: call void @llvm.lifetime.start.p0(ptr nofree noundef nonnull align 8 captures(none) dereferenceable(240) [[F]]) #[[ATTR13:[0-9]+]] ; TUNIT-NEXT: [[CALL:%.*]] = call i32 @sh_fromstring(ptr noundef nonnull align 8 dereferenceable(240) [[F]], ptr [[S]]) ; TUNIT-NEXT: call void @__shlim(ptr noundef nonnull align 8 dereferenceable(240) [[F]], i64 noundef 0) ; TUNIT-NEXT: [[CALL1:%.*]] = call double @__floatscan(ptr noundef nonnull align 8 dereferenceable(240) [[F]], i32 noundef 1, i32 noundef 1) -; TUNIT-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 144, ptr nofree noundef nonnull align 8 captures(none) dereferenceable(240) [[F]]) +; TUNIT-NEXT: call void @llvm.lifetime.end.p0(ptr nofree noundef nonnull align 8 captures(none) dereferenceable(240) [[F]]) ; TUNIT-NEXT: ret double [[CALL1]] ; ; CGSCC-LABEL: define {{[^@]+}}@strtox ; CGSCC-SAME: (ptr [[S:%.*]]) unnamed_addr { ; CGSCC-NEXT: entry: ; CGSCC-NEXT: [[F:%.*]] = alloca [[STRUCT__IO_FILE:%.*]], align 8 -; CGSCC-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 144, ptr nofree noundef nonnull align 8 captures(none) dereferenceable(240) [[F]]) #[[ATTR14:[0-9]+]] +; CGSCC-NEXT: call void @llvm.lifetime.start.p0(ptr nofree noundef nonnull align 8 captures(none) dereferenceable(240) [[F]]) #[[ATTR14:[0-9]+]] ; CGSCC-NEXT: [[CALL:%.*]] = call i32 @sh_fromstring(ptr noundef nonnull align 8 dereferenceable(240) [[F]], ptr [[S]]) ; CGSCC-NEXT: call void @__shlim(ptr noundef nonnull align 8 dereferenceable(240) [[F]], i64 noundef 0) ; CGSCC-NEXT: [[CALL1:%.*]] = call double @__floatscan(ptr noundef nonnull align 8 dereferenceable(240) [[F]], i32 noundef 1, i32 noundef 1) -; CGSCC-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 144, ptr nofree noundef nonnull align 8 captures(none) dereferenceable(240) [[F]]) +; CGSCC-NEXT: call void @llvm.lifetime.end.p0(ptr nofree noundef nonnull align 8 captures(none) dereferenceable(240) [[F]]) ; CGSCC-NEXT: ret double [[CALL1]] ; entry: %f = alloca %struct._IO_FILE, align 8 - call void @llvm.lifetime.start.p0(i64 144, ptr nonnull %f) + call void @llvm.lifetime.start.p0(ptr nonnull %f) %call = call i32 @sh_fromstring(ptr nonnull %f, ptr %s) call void @__shlim(ptr nonnull %f, i64 0) %call1 = call double @__floatscan(ptr nonnull %f, i32 %prec, i32 1) - call void @llvm.lifetime.end.p0(i64 144, ptr nonnull %f) + call void @llvm.lifetime.end.p0(ptr nonnull %f) ret double %call1 } @@ -620,7 +620,7 @@ entry: } ; Function Attrs: argmemonly nounwind willreturn -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) ; Function Attrs: optsize declare dso_local i32 @sh_fromstring(...) local_unnamed_addr @@ -632,7 +632,7 @@ declare dso_local void @__shlim(ptr, i64) local_unnamed_addr declare dso_local double @__floatscan(ptr, i32, i32) local_unnamed_addr ; Function Attrs: argmemonly nounwind willreturn -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) ; Test 15 ; propagate noalias to some callsite arguments that there is no possibly reachable capture before it diff --git a/llvm/test/Transforms/Attributor/openmp_parallel.ll b/llvm/test/Transforms/Attributor/openmp_parallel.ll index d7b194d..54da16c 100644 --- a/llvm/test/Transforms/Attributor/openmp_parallel.ll +++ b/llvm/test/Transforms/Attributor/openmp_parallel.ll @@ -68,13 +68,13 @@ define internal void @.omp_outlined.(ptr noalias nocapture readonly %.global_tid ; TUNIT-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 ; TUNIT-NEXT: br label [[OMP_PRECOND_THEN:%.*]] ; TUNIT: omp.precond.then: -; TUNIT-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_LB]]) #[[ATTR3:[0-9]+]] +; TUNIT-NEXT: call void @llvm.lifetime.start.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_LB]]) #[[ATTR3:[0-9]+]] ; TUNIT-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -; TUNIT-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_UB]]) #[[ATTR3]] +; TUNIT-NEXT: call void @llvm.lifetime.start.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_UB]]) #[[ATTR3]] ; TUNIT-NEXT: store i32 197, ptr [[DOTOMP_UB]], align 4 -; TUNIT-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_STRIDE]]) #[[ATTR3]] +; TUNIT-NEXT: call void @llvm.lifetime.start.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_STRIDE]]) #[[ATTR3]] ; TUNIT-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -; TUNIT-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_IS_LAST]]) #[[ATTR3]] +; TUNIT-NEXT: call void @llvm.lifetime.start.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_IS_LAST]]) #[[ATTR3]] ; TUNIT-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 ; TUNIT-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTGLOBAL_TID_]], align 4 ; TUNIT-NEXT: call void @__kmpc_for_static_init_4(ptr noundef nonnull align 8 dereferenceable(24) @[[GLOB1]], i32 [[TMP0]], i32 noundef 34, ptr noundef nonnull align 4 dereferenceable(4) [[DOTOMP_IS_LAST]], ptr noundef nonnull align 4 dereferenceable(4) [[DOTOMP_LB]], ptr noundef nonnull align 4 dereferenceable(4) [[DOTOMP_UB]], ptr noundef nonnull align 4 dereferenceable(4) [[DOTOMP_STRIDE]], i32 noundef 1, i32 noundef 1) @@ -103,10 +103,10 @@ define internal void @.omp_outlined.(ptr noalias nocapture readonly %.global_tid ; TUNIT-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY]], label [[OMP_LOOP_EXIT]] ; TUNIT: omp.loop.exit: ; TUNIT-NEXT: call void @__kmpc_for_static_fini(ptr noundef nonnull align 8 dereferenceable(24) @[[GLOB1]], i32 [[TMP0]]) -; TUNIT-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_IS_LAST]]) -; TUNIT-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_STRIDE]]) -; TUNIT-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_UB]]) -; TUNIT-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_LB]]) +; TUNIT-NEXT: call void @llvm.lifetime.end.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_IS_LAST]]) +; TUNIT-NEXT: call void @llvm.lifetime.end.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_STRIDE]]) +; TUNIT-NEXT: call void @llvm.lifetime.end.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_UB]]) +; TUNIT-NEXT: call void @llvm.lifetime.end.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_LB]]) ; TUNIT-NEXT: br label [[OMP_PRECOND_END:%.*]] ; TUNIT: omp.precond.end: ; TUNIT-NEXT: ret void @@ -124,13 +124,13 @@ define internal void @.omp_outlined.(ptr noalias nocapture readonly %.global_tid ; CGSCC-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP0]], 1 ; CGSCC-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] ; CGSCC: omp.precond.then: -; CGSCC-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_LB]]) #[[ATTR3:[0-9]+]] +; CGSCC-NEXT: call void @llvm.lifetime.start.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_LB]]) #[[ATTR3:[0-9]+]] ; CGSCC-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -; CGSCC-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_UB]]) #[[ATTR3]] +; CGSCC-NEXT: call void @llvm.lifetime.start.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_UB]]) #[[ATTR3]] ; CGSCC-NEXT: store i32 [[SUB2]], ptr [[DOTOMP_UB]], align 4 -; CGSCC-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_STRIDE]]) #[[ATTR3]] +; CGSCC-NEXT: call void @llvm.lifetime.start.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_STRIDE]]) #[[ATTR3]] ; CGSCC-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -; CGSCC-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_IS_LAST]]) #[[ATTR3]] +; CGSCC-NEXT: call void @llvm.lifetime.start.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_IS_LAST]]) #[[ATTR3]] ; CGSCC-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 ; CGSCC-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTGLOBAL_TID_]], align 4 ; CGSCC-NEXT: call void @__kmpc_for_static_init_4(ptr noundef nonnull align 8 dereferenceable(24) @[[GLOB1]], i32 [[TMP1]], i32 noundef 34, ptr noundef nonnull align 4 dereferenceable(4) [[DOTOMP_IS_LAST]], ptr noundef nonnull align 4 dereferenceable(4) [[DOTOMP_LB]], ptr noundef nonnull align 4 dereferenceable(4) [[DOTOMP_UB]], ptr noundef nonnull align 4 dereferenceable(4) [[DOTOMP_STRIDE]], i32 noundef 1, i32 noundef 1) @@ -159,10 +159,10 @@ define internal void @.omp_outlined.(ptr noalias nocapture readonly %.global_tid ; CGSCC-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY]], label [[OMP_LOOP_EXIT]] ; CGSCC: omp.loop.exit: ; CGSCC-NEXT: call void @__kmpc_for_static_fini(ptr noundef nonnull align 8 dereferenceable(24) @[[GLOB1]], i32 [[TMP1]]) -; CGSCC-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_IS_LAST]]) -; CGSCC-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_STRIDE]]) -; CGSCC-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_UB]]) -; CGSCC-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_LB]]) +; CGSCC-NEXT: call void @llvm.lifetime.end.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_IS_LAST]]) +; CGSCC-NEXT: call void @llvm.lifetime.end.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_STRIDE]]) +; CGSCC-NEXT: call void @llvm.lifetime.end.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_UB]]) +; CGSCC-NEXT: call void @llvm.lifetime.end.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_LB]]) ; CGSCC-NEXT: br label [[OMP_PRECOND_END]] ; CGSCC: omp.precond.end: ; CGSCC-NEXT: ret void @@ -178,13 +178,13 @@ entry: br i1 %cmp, label %omp.precond.then, label %omp.precond.end omp.precond.then: ; preds = %entry - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %.omp.lb) #3 + call void @llvm.lifetime.start.p0(ptr nonnull %.omp.lb) #3 store i32 0, ptr %.omp.lb, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %.omp.ub) #3 + call void @llvm.lifetime.start.p0(ptr nonnull %.omp.ub) #3 store i32 %sub2, ptr %.omp.ub, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %.omp.stride) #3 + call void @llvm.lifetime.start.p0(ptr nonnull %.omp.stride) #3 store i32 1, ptr %.omp.stride, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %.omp.is_last) #3 + call void @llvm.lifetime.start.p0(ptr nonnull %.omp.is_last) #3 store i32 0, ptr %.omp.is_last, align 4 %1 = load i32, ptr %.global_tid., align 4 call void @__kmpc_for_static_init_4(ptr nonnull @1, i32 %1, i32 34, ptr nonnull %.omp.is_last, ptr nonnull %.omp.lb, ptr nonnull %.omp.ub, ptr nonnull %.omp.stride, i32 1, i32 1) #3 @@ -216,10 +216,10 @@ omp.inner.for.body: ; preds = %omp.inner.for.body, omp.loop.exit: ; preds = %omp.inner.for.body, %omp.precond.then call void @__kmpc_for_static_fini(ptr nonnull @1, i32 %1) - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %.omp.is_last) #3 - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %.omp.stride) #3 - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %.omp.ub) #3 - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %.omp.lb) #3 + call void @llvm.lifetime.end.p0(ptr nonnull %.omp.is_last) #3 + call void @llvm.lifetime.end.p0(ptr nonnull %.omp.stride) #3 + call void @llvm.lifetime.end.p0(ptr nonnull %.omp.ub) #3 + call void @llvm.lifetime.end.p0(ptr nonnull %.omp.lb) #3 br label %omp.precond.end omp.precond.end: ; preds = %omp.loop.exit, %entry @@ -227,10 +227,10 @@ omp.precond.end: ; preds = %omp.loop.exit, %ent } ; Function Attrs: argmemonly nounwind willreturn -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #2 +declare void @llvm.lifetime.start.p0(ptr nocapture) #2 ; Function Attrs: argmemonly nounwind willreturn -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #2 +declare void @llvm.lifetime.end.p0(ptr nocapture) #2 declare dso_local void @__kmpc_for_static_init_4(ptr, i32, i32, ptr, ptr, ptr, ptr, i32, i32) local_unnamed_addr diff --git a/llvm/test/Transforms/Attributor/reduced/register_benchmark_test.ll b/llvm/test/Transforms/Attributor/reduced/register_benchmark_test.ll index 472ed30..eb7d78f 100644 --- a/llvm/test/Transforms/Attributor/reduced/register_benchmark_test.ll +++ b/llvm/test/Transforms/Attributor/reduced/register_benchmark_test.ll @@ -201,7 +201,7 @@ declare dso_local void @_ZNSt6vectorIN12_GLOBAL__N_18TestCaseESaIS1_EED2Ev(ptr) declare dso_local void @_Z11BM_functionRN9benchmark5StateE(ptr dereferenceable(144)) #0 ; Function Attrs: argmemonly nounwind willreturn -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #5 +declare void @llvm.lifetime.start.p0(ptr nocapture) #5 ; Function Attrs: alwaysinline uwtable declare dso_local { i64, ptr } @_ZN9benchmark5State5beginEv(ptr) #6 align 2 @@ -216,7 +216,7 @@ declare dso_local zeroext i1 @_ZNK9benchmark5State13StateIteratorneERKS1_(ptr, p declare dso_local void @_ZNK9benchmark5State13StateIteratordeEv(ptr) #7 align 2 ; Function Attrs: argmemonly nounwind willreturn -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #5 +declare void @llvm.lifetime.end.p0(ptr nocapture) #5 ; Function Attrs: alwaysinline nounwind uwtable declare dso_local dereferenceable(16) ptr @_ZN9benchmark5State13StateIteratorppEv(ptr) #7 align 2 diff --git a/llvm/test/Transforms/Attributor/value-simplify-pointer-info.ll b/llvm/test/Transforms/Attributor/value-simplify-pointer-info.ll index fa942c9..82bed0f 100644 --- a/llvm/test/Transforms/Attributor/value-simplify-pointer-info.ll +++ b/llvm/test/Transforms/Attributor/value-simplify-pointer-info.ll @@ -116,7 +116,7 @@ define void @local_alloca_simplifiable_1(ptr noalias sret(%struct.S) align 4 %ag ; TUNIT-SAME: (ptr noalias nofree writeonly sret([[STRUCT_S:%.*]]) align 4 captures(none) dereferenceable_or_null(24) [[AGG_RESULT:%.*]]) #[[ATTR1:[0-9]+]] { ; TUNIT-NEXT: entry: ; TUNIT-NEXT: [[S:%.*]] = alloca [[STRUCT_S]], align 4 -; TUNIT-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 24, ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(24) [[S]]) #[[ATTR17:[0-9]+]] +; TUNIT-NEXT: call void @llvm.lifetime.start.p0(ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(24) [[S]]) #[[ATTR17:[0-9]+]] ; TUNIT-NEXT: [[F1:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[S]], i64 0, i32 3 ; TUNIT-NEXT: [[F2:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[S]], i64 0, i32 4 ; TUNIT-NEXT: [[F3:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[S]], i64 0, i32 5 @@ -136,7 +136,7 @@ define void @local_alloca_simplifiable_1(ptr noalias sret(%struct.S) align 4 %ag ; TUNIT-NEXT: store i32 4, ptr [[I212]], align 4, !tbaa [[TBAA13:![0-9]+]] ; TUNIT-NEXT: [[I316:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[AGG_RESULT]], i64 0, i32 2 ; TUNIT-NEXT: store i32 4, ptr [[I316]], align 4, !tbaa [[TBAA14:![0-9]+]] -; TUNIT-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 24, ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(24) [[S]]) #[[ATTR17]] +; TUNIT-NEXT: call void @llvm.lifetime.end.p0(ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(24) [[S]]) #[[ATTR17]] ; TUNIT-NEXT: ret void ; ; CGSCC: Function Attrs: mustprogress nofree nosync nounwind willreturn memory(argmem: readwrite) @@ -144,7 +144,7 @@ define void @local_alloca_simplifiable_1(ptr noalias sret(%struct.S) align 4 %ag ; CGSCC-SAME: (ptr noalias nofree noundef nonnull writeonly sret([[STRUCT_S:%.*]]) align 4 captures(none) dereferenceable(24) [[AGG_RESULT:%.*]]) #[[ATTR1:[0-9]+]] { ; CGSCC-NEXT: entry: ; CGSCC-NEXT: [[S:%.*]] = alloca [[STRUCT_S]], align 4 -; CGSCC-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 24, ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(24) [[S]]) #[[ATTR20:[0-9]+]] +; CGSCC-NEXT: call void @llvm.lifetime.start.p0(ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(24) [[S]]) #[[ATTR20:[0-9]+]] ; CGSCC-NEXT: [[F1:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[S]], i64 0, i32 3 ; CGSCC-NEXT: store float 0x3FF19999A0000000, ptr [[F1]], align 4, !tbaa [[TBAA7:![0-9]+]] ; CGSCC-NEXT: [[F2:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[S]], i64 0, i32 4 @@ -185,12 +185,12 @@ define void @local_alloca_simplifiable_1(ptr noalias sret(%struct.S) align 4 %ag ; CGSCC-NEXT: [[ADD15:%.*]] = add nsw i32 [[I10]], [[I11]] ; CGSCC-NEXT: [[I316:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[AGG_RESULT]], i64 0, i32 2 ; CGSCC-NEXT: store i32 [[ADD15]], ptr [[I316]], align 4, !tbaa [[TBAA14]] -; CGSCC-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 24, ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(24) [[S]]) #[[ATTR20]] +; CGSCC-NEXT: call void @llvm.lifetime.end.p0(ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(24) [[S]]) #[[ATTR20]] ; CGSCC-NEXT: ret void ; entry: %s = alloca %struct.S, align 4 - call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %s) + call void @llvm.lifetime.start.p0(ptr nonnull %s) %f1 = getelementptr inbounds %struct.S, ptr %s, i64 0, i32 3 store float 0x3FF19999A0000000, ptr %f1, align 4, !tbaa !7 %f2 = getelementptr inbounds %struct.S, ptr %s, i64 0, i32 4 @@ -231,13 +231,13 @@ entry: %add15 = add nsw i32 %i10, %i11 %i316 = getelementptr inbounds %struct.S, ptr %agg.result, i64 0, i32 2 store i32 %add15, ptr %i316, align 4, !tbaa !14 - call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %s) + call void @llvm.lifetime.end.p0(ptr nonnull %s) ret void } -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) ; void local_alloca_simplifiable_2(void) { ; char Bytes[1024]; @@ -260,7 +260,7 @@ define void @local_alloca_simplifiable_2() { ; TUNIT-SAME: () #[[ATTR3:[0-9]+]] { ; TUNIT-NEXT: entry: ; TUNIT-NEXT: [[BYTES:%.*]] = alloca [1024 x i8], align 16 -; TUNIT-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 1024, ptr noalias nofree noundef nonnull align 16 captures(none) dereferenceable(1024) [[BYTES]]) #[[ATTR17]] +; TUNIT-NEXT: call void @llvm.lifetime.start.p0(ptr noalias nofree noundef nonnull align 16 captures(none) dereferenceable(1024) [[BYTES]]) #[[ATTR17]] ; TUNIT-NEXT: br label [[FOR_COND:%.*]] ; TUNIT: for.cond: ; TUNIT-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC:%.*]] ], [ 0, [[ENTRY:%.*]] ] @@ -326,7 +326,7 @@ define void @local_alloca_simplifiable_2() { ; TUNIT-NEXT: [[INDVARS_IV_NEXT13]] = add nuw nsw i64 [[INDVARS_IV12]], 1 ; TUNIT-NEXT: br label [[FOR_COND28]], !llvm.loop [[LOOP20:![0-9]+]] ; TUNIT: for.end38: -; TUNIT-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 1024, ptr noalias nofree noundef nonnull align 16 captures(none) dereferenceable(1024) [[BYTES]]) #[[ATTR17]] +; TUNIT-NEXT: call void @llvm.lifetime.end.p0(ptr noalias nofree noundef nonnull align 16 captures(none) dereferenceable(1024) [[BYTES]]) #[[ATTR17]] ; TUNIT-NEXT: ret void ; ; CGSCC: Function Attrs: mustprogress nofree nosync nounwind willreturn @@ -334,7 +334,7 @@ define void @local_alloca_simplifiable_2() { ; CGSCC-SAME: () #[[ATTR3:[0-9]+]] { ; CGSCC-NEXT: entry: ; CGSCC-NEXT: [[BYTES:%.*]] = alloca [1024 x i8], align 16 -; CGSCC-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 1024, ptr noalias nofree noundef nonnull align 16 captures(none) dereferenceable(1024) [[BYTES]]) #[[ATTR20]] +; CGSCC-NEXT: call void @llvm.lifetime.start.p0(ptr noalias nofree noundef nonnull align 16 captures(none) dereferenceable(1024) [[BYTES]]) #[[ATTR20]] ; CGSCC-NEXT: br label [[FOR_COND:%.*]] ; CGSCC: for.cond: ; CGSCC-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC:%.*]] ], [ 0, [[ENTRY:%.*]] ] @@ -406,12 +406,12 @@ define void @local_alloca_simplifiable_2() { ; CGSCC-NEXT: [[INDVARS_IV_NEXT13]] = add nuw nsw i64 [[INDVARS_IV12]], 1 ; CGSCC-NEXT: br label [[FOR_COND28]], !llvm.loop [[LOOP23:![0-9]+]] ; CGSCC: for.end38: -; CGSCC-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 1024, ptr noalias nofree noundef nonnull align 16 captures(none) dereferenceable(1024) [[BYTES]]) #[[ATTR20]] +; CGSCC-NEXT: call void @llvm.lifetime.end.p0(ptr noalias nofree noundef nonnull align 16 captures(none) dereferenceable(1024) [[BYTES]]) #[[ATTR20]] ; CGSCC-NEXT: ret void ; entry: %Bytes = alloca [1024 x i8], align 16 - call void @llvm.lifetime.start.p0(i64 1024, ptr nonnull %Bytes) + call void @llvm.lifetime.start.p0(ptr nonnull %Bytes) br label %for.cond for.cond: ; preds = %for.inc, %entry @@ -503,7 +503,7 @@ for.inc36: ; preds = %for.body31 br label %for.cond28, !llvm.loop !23 for.end38: ; preds = %for.cond.cleanup30 - call void @llvm.lifetime.end.p0(i64 1024, ptr nonnull %Bytes) + call void @llvm.lifetime.end.p0(ptr nonnull %Bytes) ret void } @@ -558,7 +558,7 @@ define i32 @multi_obj_simplifiable_1(i32 %cnd) { ; TUNIT-SAME: (i32 [[CND:%.*]]) #[[ATTR3]] { ; TUNIT-NEXT: entry: ; TUNIT-NEXT: [[L:%.*]] = alloca i32, align 4 -; TUNIT-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[L]]) #[[ATTR17]] +; TUNIT-NEXT: call void @llvm.lifetime.start.p0(ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[L]]) #[[ATTR17]] ; TUNIT-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[CND]], 0 ; TUNIT-NEXT: br i1 [[TOBOOL_NOT]], label [[COND_FALSE:%.*]], label [[COND_TRUE:%.*]] ; TUNIT: cond.true: @@ -566,7 +566,7 @@ define i32 @multi_obj_simplifiable_1(i32 %cnd) { ; TUNIT: cond.false: ; TUNIT-NEXT: br label [[COND_END]] ; TUNIT: cond.end: -; TUNIT-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[L]]) #[[ATTR17]] +; TUNIT-NEXT: call void @llvm.lifetime.end.p0(ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[L]]) #[[ATTR17]] ; TUNIT-NEXT: ret i32 5 ; ; CGSCC: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn @@ -574,7 +574,7 @@ define i32 @multi_obj_simplifiable_1(i32 %cnd) { ; CGSCC-SAME: (i32 [[CND:%.*]]) #[[ATTR5:[0-9]+]] { ; CGSCC-NEXT: entry: ; CGSCC-NEXT: [[L:%.*]] = alloca i32, align 4 -; CGSCC-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[L]]) #[[ATTR20]] +; CGSCC-NEXT: call void @llvm.lifetime.start.p0(ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[L]]) #[[ATTR20]] ; CGSCC-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[CND]], 0 ; CGSCC-NEXT: br i1 [[TOBOOL_NOT]], label [[COND_FALSE:%.*]], label [[COND_TRUE:%.*]] ; CGSCC: cond.true: @@ -582,12 +582,12 @@ define i32 @multi_obj_simplifiable_1(i32 %cnd) { ; CGSCC: cond.false: ; CGSCC-NEXT: br label [[COND_END]] ; CGSCC: cond.end: -; CGSCC-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[L]]) #[[ATTR20]] +; CGSCC-NEXT: call void @llvm.lifetime.end.p0(ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[L]]) #[[ATTR20]] ; CGSCC-NEXT: ret i32 5 ; entry: %L = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %L) + call void @llvm.lifetime.start.p0(ptr nonnull %L) store i32 5, ptr @GI1, align 4, !tbaa !3 store i32 5, ptr %L, align 4, !tbaa !3 %tobool.not = icmp eq i32 %cnd, 0 @@ -602,7 +602,7 @@ cond.false: ; preds = %entry cond.end: ; preds = %cond.false, %cond.true %cond = phi ptr [ @GI1, %cond.true ], [ %L, %cond.false ] %i1 = load i32, ptr %cond, align 4, !tbaa !3 - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %L) + call void @llvm.lifetime.end.p0(ptr nonnull %L) ret i32 %i1 } @@ -620,7 +620,7 @@ define i32 @multi_obj_simplifiable_2(i32 %cnd) { ; TUNIT-SAME: (i32 [[CND:%.*]]) #[[ATTR3]] { ; TUNIT-NEXT: entry: ; TUNIT-NEXT: [[L:%.*]] = alloca i32, align 4 -; TUNIT-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[L]]) #[[ATTR17]] +; TUNIT-NEXT: call void @llvm.lifetime.start.p0(ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[L]]) #[[ATTR17]] ; TUNIT-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[CND]], 0 ; TUNIT-NEXT: br i1 [[TOBOOL_NOT]], label [[COND_FALSE:%.*]], label [[COND_TRUE:%.*]] ; TUNIT: cond.true: @@ -628,7 +628,7 @@ define i32 @multi_obj_simplifiable_2(i32 %cnd) { ; TUNIT: cond.false: ; TUNIT-NEXT: br label [[COND_END]] ; TUNIT: cond.end: -; TUNIT-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[L]]) #[[ATTR17]] +; TUNIT-NEXT: call void @llvm.lifetime.end.p0(ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[L]]) #[[ATTR17]] ; TUNIT-NEXT: ret i32 5 ; ; CGSCC: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn @@ -636,7 +636,7 @@ define i32 @multi_obj_simplifiable_2(i32 %cnd) { ; CGSCC-SAME: (i32 [[CND:%.*]]) #[[ATTR5]] { ; CGSCC-NEXT: entry: ; CGSCC-NEXT: [[L:%.*]] = alloca i32, align 4 -; CGSCC-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[L]]) #[[ATTR20]] +; CGSCC-NEXT: call void @llvm.lifetime.start.p0(ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[L]]) #[[ATTR20]] ; CGSCC-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[CND]], 0 ; CGSCC-NEXT: br i1 [[TOBOOL_NOT]], label [[COND_FALSE:%.*]], label [[COND_TRUE:%.*]] ; CGSCC: cond.true: @@ -644,12 +644,12 @@ define i32 @multi_obj_simplifiable_2(i32 %cnd) { ; CGSCC: cond.false: ; CGSCC-NEXT: br label [[COND_END]] ; CGSCC: cond.end: -; CGSCC-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[L]]) #[[ATTR20]] +; CGSCC-NEXT: call void @llvm.lifetime.end.p0(ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[L]]) #[[ATTR20]] ; CGSCC-NEXT: ret i32 5 ; entry: %L = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %L) + call void @llvm.lifetime.start.p0(ptr nonnull %L) %tobool.not = icmp eq i32 %cnd, 0 br i1 %tobool.not, label %cond.false, label %cond.true @@ -663,7 +663,7 @@ cond.end: ; preds = %cond.false, %cond.t %cond = phi ptr [ @GI2, %cond.true ], [ %L, %cond.false ] store i32 5, ptr %cond, align 4, !tbaa !3 %l = load i32, ptr %cond, align 4, !tbaa !3 - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %L) + call void @llvm.lifetime.end.p0(ptr nonnull %L) ret i32 %l } @@ -1528,8 +1528,8 @@ define i32 @local_alloca_not_simplifiable_1() { ; TUNIT-NEXT: entry: ; TUNIT-NEXT: [[X:%.*]] = alloca i32, align 4 ; TUNIT-NEXT: [[Y:%.*]] = alloca i32, align 4 -; TUNIT-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[X]]) #[[ATTR17]] -; TUNIT-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[Y]]) #[[ATTR17]] +; TUNIT-NEXT: call void @llvm.lifetime.start.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[X]]) #[[ATTR17]] +; TUNIT-NEXT: call void @llvm.lifetime.start.p0(ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[Y]]) #[[ATTR17]] ; TUNIT-NEXT: store i32 1, ptr [[Y]], align 4, !tbaa [[TBAA3]] ; TUNIT-NEXT: store i32 1, ptr [[X]], align 4, !tbaa [[TBAA3]] ; TUNIT-NEXT: call void @escape(ptr noundef nonnull align 4 dereferenceable(4) [[X]]) @@ -1540,16 +1540,16 @@ define i32 @local_alloca_not_simplifiable_1() { ; TUNIT-NEXT: [[I4:%.*]] = load i32, ptr [[Y]], align 4, !tbaa [[TBAA3]] ; TUNIT-NEXT: [[ADD:%.*]] = add nsw i32 [[I3]], [[I4]] ; TUNIT-NEXT: [[ADD1:%.*]] = add nsw i32 [[ADD]], [[COND]] -; TUNIT-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[Y]]) -; TUNIT-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[X]]) +; TUNIT-NEXT: call void @llvm.lifetime.end.p0(ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[Y]]) +; TUNIT-NEXT: call void @llvm.lifetime.end.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[X]]) ; TUNIT-NEXT: ret i32 [[ADD1]] ; ; CGSCC-LABEL: define {{[^@]+}}@local_alloca_not_simplifiable_1() { ; CGSCC-NEXT: entry: ; CGSCC-NEXT: [[X:%.*]] = alloca i32, align 4 ; CGSCC-NEXT: [[Y:%.*]] = alloca i32, align 4 -; CGSCC-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[X]]) #[[ATTR20]] -; CGSCC-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[Y]]) #[[ATTR20]] +; CGSCC-NEXT: call void @llvm.lifetime.start.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[X]]) #[[ATTR20]] +; CGSCC-NEXT: call void @llvm.lifetime.start.p0(ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[Y]]) #[[ATTR20]] ; CGSCC-NEXT: store i32 1, ptr [[Y]], align 4, !tbaa [[TBAA3]] ; CGSCC-NEXT: store i32 1, ptr [[X]], align 4, !tbaa [[TBAA3]] ; CGSCC-NEXT: call void @escape(ptr noundef nonnull align 4 dereferenceable(4) [[X]]) @@ -1560,15 +1560,15 @@ define i32 @local_alloca_not_simplifiable_1() { ; CGSCC-NEXT: [[I4:%.*]] = load i32, ptr [[Y]], align 4, !tbaa [[TBAA3]] ; CGSCC-NEXT: [[ADD:%.*]] = add nsw i32 [[I3]], [[I4]] ; CGSCC-NEXT: [[ADD1:%.*]] = add nsw i32 [[ADD]], [[COND]] -; CGSCC-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[Y]]) -; CGSCC-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[X]]) +; CGSCC-NEXT: call void @llvm.lifetime.end.p0(ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[Y]]) +; CGSCC-NEXT: call void @llvm.lifetime.end.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[X]]) ; CGSCC-NEXT: ret i32 [[ADD1]] ; entry: %X = alloca i32, align 4 %Y = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %X) - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %Y) + call void @llvm.lifetime.start.p0(ptr nonnull %X) + call void @llvm.lifetime.start.p0(ptr nonnull %Y) store i32 1, ptr %Y, align 4, !tbaa !3 store i32 1, ptr %X, align 4, !tbaa !3 call void @escape(ptr nonnull %X) @@ -1579,8 +1579,8 @@ entry: %i4 = load i32, ptr %Y, align 4, !tbaa !3 %add = add nsw i32 %i3, %i4 %add1 = add nsw i32 %add, %cond - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %Y) - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %X) + call void @llvm.lifetime.end.p0(ptr nonnull %Y) + call void @llvm.lifetime.end.p0(ptr nonnull %X) ret i32 %add1 } @@ -2755,7 +2755,7 @@ define hidden void @no_propagation_of_unknown_index_access(ptr %in, ptr %out, i3 ; TUNIT-SAME: (ptr nofree readonly captures(none) [[IN:%.*]], ptr nofree writeonly captures(none) [[OUT:%.*]], i32 [[IDX:%.*]]) #[[ATTR1]] { ; TUNIT-NEXT: entry: ; TUNIT-NEXT: [[BUF:%.*]] = alloca [128 x i32], align 16 -; TUNIT-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 512, ptr noalias nofree noundef nonnull align 16 captures(none) dereferenceable(512) [[BUF]]) #[[ATTR17]] +; TUNIT-NEXT: call void @llvm.lifetime.start.p0(ptr noalias nofree noundef nonnull align 16 captures(none) dereferenceable(512) [[BUF]]) #[[ATTR17]] ; TUNIT-NEXT: br label [[FOR_COND:%.*]] ; TUNIT: for.cond: ; TUNIT-NEXT: [[I_0:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY:%.*]] ] @@ -2776,7 +2776,7 @@ define hidden void @no_propagation_of_unknown_index_access(ptr %in, ptr %out, i3 ; TUNIT-NEXT: [[CMP5:%.*]] = icmp slt i32 [[I3_0]], 128 ; TUNIT-NEXT: br i1 [[CMP5]], label [[FOR_BODY7]], label [[FOR_COND_CLEANUP6:%.*]] ; TUNIT: for.cond.cleanup6: -; TUNIT-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 512, ptr noalias nofree noundef nonnull align 16 captures(none) dereferenceable(512) [[BUF]]) #[[ATTR17]] +; TUNIT-NEXT: call void @llvm.lifetime.end.p0(ptr noalias nofree noundef nonnull align 16 captures(none) dereferenceable(512) [[BUF]]) #[[ATTR17]] ; TUNIT-NEXT: ret void ; TUNIT: for.body7: ; TUNIT-NEXT: [[IDXPROM8:%.*]] = sext i32 [[I3_0]] to i64 @@ -2797,7 +2797,7 @@ define hidden void @no_propagation_of_unknown_index_access(ptr %in, ptr %out, i3 ; CGSCC-SAME: (ptr nofree readonly captures(none) [[IN:%.*]], ptr nofree writeonly captures(none) [[OUT:%.*]], i32 [[IDX:%.*]]) #[[ATTR13:[0-9]+]] { ; CGSCC-NEXT: entry: ; CGSCC-NEXT: [[BUF:%.*]] = alloca [128 x i32], align 16 -; CGSCC-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 512, ptr noalias nofree noundef nonnull align 16 captures(none) dereferenceable(512) [[BUF]]) #[[ATTR20]] +; CGSCC-NEXT: call void @llvm.lifetime.start.p0(ptr noalias nofree noundef nonnull align 16 captures(none) dereferenceable(512) [[BUF]]) #[[ATTR20]] ; CGSCC-NEXT: br label [[FOR_COND:%.*]] ; CGSCC: for.cond: ; CGSCC-NEXT: [[I_0:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY:%.*]] ] @@ -2818,7 +2818,7 @@ define hidden void @no_propagation_of_unknown_index_access(ptr %in, ptr %out, i3 ; CGSCC-NEXT: [[CMP5:%.*]] = icmp slt i32 [[I3_0]], 128 ; CGSCC-NEXT: br i1 [[CMP5]], label [[FOR_BODY7]], label [[FOR_COND_CLEANUP6:%.*]] ; CGSCC: for.cond.cleanup6: -; CGSCC-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 512, ptr noalias nofree noundef nonnull align 16 captures(none) dereferenceable(512) [[BUF]]) #[[ATTR20]] +; CGSCC-NEXT: call void @llvm.lifetime.end.p0(ptr noalias nofree noundef nonnull align 16 captures(none) dereferenceable(512) [[BUF]]) #[[ATTR20]] ; CGSCC-NEXT: ret void ; CGSCC: for.body7: ; CGSCC-NEXT: [[IDXPROM8:%.*]] = sext i32 [[I3_0]] to i64 @@ -2836,7 +2836,7 @@ define hidden void @no_propagation_of_unknown_index_access(ptr %in, ptr %out, i3 ; entry: %buf = alloca [128 x i32], align 16 - call void @llvm.lifetime.start.p0(i64 512, ptr %buf) #2 + call void @llvm.lifetime.start.p0(ptr %buf) #2 br label %for.cond for.cond: ; preds = %for.body, %entry @@ -2862,7 +2862,7 @@ for.cond4: ; preds = %for.body7, %for.con br i1 %cmp5, label %for.body7, label %for.cond.cleanup6 for.cond.cleanup6: ; preds = %for.cond4 - call void @llvm.lifetime.end.p0(i64 512, ptr %buf) #2 + call void @llvm.lifetime.end.p0(ptr %buf) #2 ret void for.body7: ; preds = %for.cond4 diff --git a/llvm/test/Transforms/CodeExtractor/LoopExtractor_alloca.ll b/llvm/test/Transforms/CodeExtractor/LoopExtractor_alloca.ll index b932a7d..09abf1f 100644 --- a/llvm/test/Transforms/CodeExtractor/LoopExtractor_alloca.ll +++ b/llvm/test/Transforms/CodeExtractor/LoopExtractor_alloca.ll @@ -30,16 +30,16 @@ entry: br label %loop1 loop1: - call void @llvm.lifetime.start.p0(i64 4, ptr %v1) + call void @llvm.lifetime.start.p0(ptr %v1) %r1 = call i32 @foo(ptr %v1) - call void @llvm.lifetime.end.p0(i64 4, ptr %v1) + call void @llvm.lifetime.end.p0(ptr %v1) %cmp1 = icmp ne i32 %r1, 0 br i1 %cmp1, label %loop1, label %loop2 loop2: - call void @llvm.lifetime.start.p0(i64 4, ptr %v2) + call void @llvm.lifetime.start.p0(ptr %v2) %r2 = call i32 @foo(ptr %v2) - call void @llvm.lifetime.end.p0(i64 4, ptr %v2) + call void @llvm.lifetime.end.p0(ptr %v2) %cmp2 = icmp ne i32 %r2, 0 br i1 %cmp2, label %loop2, label %exit @@ -49,6 +49,6 @@ exit: declare i32 @foo(ptr) -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg) diff --git a/llvm/test/Transforms/CodeExtractor/PartialInlineAlloca.ll b/llvm/test/Transforms/CodeExtractor/PartialInlineAlloca.ll index 9cdc37a..a24bb74 100644 --- a/llvm/test/Transforms/CodeExtractor/PartialInlineAlloca.ll +++ b/llvm/test/Transforms/CodeExtractor/PartialInlineAlloca.ll @@ -20,11 +20,11 @@ bb: br i1 %tmp4, label %bb6, label %bb5 bb5: ; preds = %bb - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %tmp) #2 + call void @llvm.lifetime.start.p0(ptr nonnull %tmp) #2 store i32 %tmp3, ptr %tmp, align 4, !tbaa !2 store i32 %tmp3, ptr @g, align 4, !tbaa !2 call void @bar(ptr nonnull %tmp) #2 - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %tmp) #2 + call void @llvm.lifetime.end.p0(ptr nonnull %tmp) #2 br label %bb6 bb6: ; preds = %bb5, %bb @@ -32,14 +32,14 @@ bb6: ; preds = %bb5, %bb ret i32 %tmp7 } -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 declare void @bar(ptr) local_unnamed_addr #2 declare void @bar2(ptr, ptr) local_unnamed_addr #1 ; Function Attrs: argmemonly nounwind -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 ; Function Attrs: nounwind uwtable define i32 @caller(i32 %arg) local_unnamed_addr #0 { diff --git a/llvm/test/Transforms/CodeExtractor/PartialInlineAlloca2.ll b/llvm/test/Transforms/CodeExtractor/PartialInlineAlloca2.ll index f4a37e7..22c0baf 100644 --- a/llvm/test/Transforms/CodeExtractor/PartialInlineAlloca2.ll +++ b/llvm/test/Transforms/CodeExtractor/PartialInlineAlloca2.ll @@ -18,10 +18,10 @@ bb: br i1 %tmp4, label %bb6, label %bb5 bb5: ; preds = %bb - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %tmp) #2 + call void @llvm.lifetime.start.p0(ptr nonnull %tmp) #2 store i32 %tmp3, ptr @g, align 4, !tbaa !2 call void @bar(ptr nonnull %tmp) #2 - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %tmp) #2 + call void @llvm.lifetime.end.p0(ptr nonnull %tmp) #2 br label %bb6 bb6: ; preds = %bb5, %bb @@ -30,14 +30,14 @@ bb6: ; preds = %bb5, %bb } ; Function Attrs: argmemonly nounwind -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 declare void @bar(ptr) local_unnamed_addr #2 declare void @bar2(ptr, ptr) local_unnamed_addr #1 ; Function Attrs: argmemonly nounwind -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 ; Function Attrs: nounwind uwtable define i32 @caller(i32 %arg) local_unnamed_addr #0 { diff --git a/llvm/test/Transforms/CodeExtractor/PartialInlineAlloca4.ll b/llvm/test/Transforms/CodeExtractor/PartialInlineAlloca4.ll index bd51910..8b9c5dd 100644 --- a/llvm/test/Transforms/CodeExtractor/PartialInlineAlloca4.ll +++ b/llvm/test/Transforms/CodeExtractor/PartialInlineAlloca4.ll @@ -7,7 +7,7 @@ @g = external local_unnamed_addr global i32, align 4 ; CHECK-LABEL: define{{.*}}@caller( -; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %tmp.i) +; CHECK: call void @llvm.lifetime.start.p0(ptr %tmp.i) ; CHECK-NEXT: call void @callee_unknown_use1.{{.*}}(ptr %tmp.i define i32 @callee_unknown_use1(i32 %arg) local_unnamed_addr #0 { @@ -21,11 +21,11 @@ bb: br i1 %tmp4, label %bb6, label %bb5 bb5: ; preds = %bb - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %tmp) #2 + call void @llvm.lifetime.start.p0(ptr nonnull %tmp) #2 store i32 %tmp3, ptr @g, align 4, !tbaa !2 %tmp11 = bitcast ptr %tmp to ptr call void @bar(ptr nonnull %tmp11) #2 - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %tmp) #2 + call void @llvm.lifetime.end.p0(ptr nonnull %tmp) #2 br label %bb6 bb6: ; preds = %bb5, %bb @@ -36,14 +36,14 @@ bb6: ; preds = %bb5, %bb ; Function Attrs: argmemonly nounwind -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 declare void @bar(ptr) local_unnamed_addr #2 declare void @bar2(ptr, ptr) local_unnamed_addr #1 ; Function Attrs: argmemonly nounwind -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 ; Function Attrs: nounwind uwtable define i32 @caller(i32 %arg) local_unnamed_addr #0 { diff --git a/llvm/test/Transforms/CodeExtractor/PartialInlineAlloca5.ll b/llvm/test/Transforms/CodeExtractor/PartialInlineAlloca5.ll index 54782c5..10be1c8 100644 --- a/llvm/test/Transforms/CodeExtractor/PartialInlineAlloca5.ll +++ b/llvm/test/Transforms/CodeExtractor/PartialInlineAlloca5.ll @@ -18,11 +18,11 @@ bb: br i1 %tmp4, label %bb6, label %bb5 bb5: ; preds = %bb - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %tmp) #2 + call void @llvm.lifetime.start.p0(ptr nonnull %tmp) #2 store i32 %tmp3, ptr %tmp, align 4, !tbaa !2 store i32 %tmp3, ptr @g, align 4, !tbaa !2 call void @bar(ptr nonnull %tmp) #2 - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %tmp) #2 + call void @llvm.lifetime.end.p0(ptr nonnull %tmp) #2 br label %bb6 bb6: ; preds = %bb5, %bb @@ -32,14 +32,14 @@ bb6: ; preds = %bb5, %bb } ; Function Attrs: argmemonly nounwind -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 declare void @bar(ptr) local_unnamed_addr #2 declare void @bar2(ptr, ptr) local_unnamed_addr #1 ; Function Attrs: argmemonly nounwind -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 ; Function Attrs: nounwind uwtable define i32 @caller(i32 %arg) local_unnamed_addr #0 { diff --git a/llvm/test/Transforms/CodeExtractor/PartialInlineInvokeProducesOutVal.ll b/llvm/test/Transforms/CodeExtractor/PartialInlineInvokeProducesOutVal.ll index bdf9e23..5e0ce20 100644 --- a/llvm/test/Transforms/CodeExtractor/PartialInlineInvokeProducesOutVal.ll +++ b/llvm/test/Transforms/CodeExtractor/PartialInlineInvokeProducesOutVal.ll @@ -26,10 +26,10 @@ bb5: ; preds = %bb4, %bb1, %bb ; CHECK-LABEL: bb: ; CHECK-NEXT: [[CALL26LOC:%.*]] = alloca ptr ; CHECK-LABEL: codeRepl.i: -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[CALL26LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[CALL26LOC]]) ; CHECK-NEXT: call void @bar.1.bb1(ptr [[CALL26LOC]]) ; CHECK-NEXT: %call26.reload.i = load ptr, ptr [[CALL26LOC]] -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[CALL26LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[CALL26LOC]]) define ptr @dummy_caller(i32 %arg) { bb: %tmp = tail call ptr @bar(i32 %arg) diff --git a/llvm/test/Transforms/CodeExtractor/live_shrink.ll b/llvm/test/Transforms/CodeExtractor/live_shrink.ll index f5debc503..43cc248 100644 --- a/llvm/test/Transforms/CodeExtractor/live_shrink.ll +++ b/llvm/test/Transforms/CodeExtractor/live_shrink.ll @@ -8,7 +8,7 @@ define void @_Z3foov() local_unnamed_addr { bb: %tmp = alloca %class.A, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %tmp) + call void @llvm.lifetime.start.p0(ptr nonnull %tmp) %tmp2 = load i32, ptr @cond, align 4, !tbaa !2 %tmp3 = icmp eq i32 %tmp2, 0 br i1 %tmp3, label %bb4, label %bb5 @@ -18,17 +18,17 @@ bb4: ; preds = %bb br label %bb5 bb5: ; preds = %bb4, %bb - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %tmp) + call void @llvm.lifetime.end.p0(ptr nonnull %tmp) ret void } ; Function Attrs: argmemonly nounwind -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) declare void @_ZN1A7memfuncEv(ptr) local_unnamed_addr ; Function Attrs: argmemonly nounwind -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) ; Function Attrs: uwtable define void @_Z3goov() local_unnamed_addr { @@ -49,8 +49,8 @@ bb: ; CHECK-LABEL: define internal void @_Z3foov.1. ; CHECK: newFuncRoot: ; CHECK-NEXT: %tmp = alloca %class.A -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %tmp) -; CHECK: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %tmp) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull %tmp) +; CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %tmp) ; CHECK-NEXT: br label %bb5.exitStub diff --git a/llvm/test/Transforms/CodeExtractor/live_shrink_gep.ll b/llvm/test/Transforms/CodeExtractor/live_shrink_gep.ll index e9d5fb6..ef815ad 100644 --- a/llvm/test/Transforms/CodeExtractor/live_shrink_gep.ll +++ b/llvm/test/Transforms/CodeExtractor/live_shrink_gep.ll @@ -9,7 +9,7 @@ define void @_Z3foov() local_unnamed_addr { bb: %tmp = alloca %class.A, align 1 - call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %tmp) + call void @llvm.lifetime.start.p0(ptr nonnull %tmp) %tmp2 = load i32, ptr @cond, align 4, !tbaa !2 %tmp3 = icmp eq i32 %tmp2, 0 br i1 %tmp3, label %bb4, label %bb5 @@ -19,17 +19,17 @@ bb4: ; preds = %bb br label %bb5 bb5: ; preds = %bb4, %bb - call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %tmp) + call void @llvm.lifetime.end.p0(ptr nonnull %tmp) ret void } ; Function Attrs: argmemonly nounwind -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) declare void @_ZN1A7memfuncEv(ptr) local_unnamed_addr ; Function Attrs: argmemonly nounwind -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) ; Function Attrs: uwtable define void @_Z3goov() local_unnamed_addr { diff --git a/llvm/test/Transforms/CodeExtractor/live_shrink_hoist.ll b/llvm/test/Transforms/CodeExtractor/live_shrink_hoist.ll index 6f63bca..7074854 100644 --- a/llvm/test/Transforms/CodeExtractor/live_shrink_hoist.ll +++ b/llvm/test/Transforms/CodeExtractor/live_shrink_hoist.ll @@ -9,7 +9,7 @@ define void @_Z3foov() local_unnamed_addr { bb: %tmp = alloca %class.A, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %tmp) + call void @llvm.lifetime.start.p0(ptr nonnull %tmp) %tmp2 = load i32, ptr @cond, align 4, !tbaa !2 %tmp3 = icmp eq i32 %tmp2, 0 br i1 %tmp3, label %bb4, label %bb9 @@ -29,17 +29,17 @@ bb8: ; preds = %bb4 br label %bb9 bb9: ; preds = %bb8, %bb4, %bb - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %tmp) + call void @llvm.lifetime.end.p0(ptr nonnull %tmp) ret void } ; Function Attrs: argmemonly nounwind -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) declare void @_ZN1A7memfuncEv(ptr) local_unnamed_addr ; Function Attrs: argmemonly nounwind -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) ; Function Attrs: uwtable define void @_Z3goov() local_unnamed_addr { @@ -50,7 +50,7 @@ bb: ; CHECK-LABEL: define internal void @_Z3foov.1. ; CHECK: bb9: -; CHECK: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %tmp) +; CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %tmp) ; CHECK: br label %.exitStub diff --git a/llvm/test/Transforms/CodeExtractor/live_shrink_multiple.ll b/llvm/test/Transforms/CodeExtractor/live_shrink_multiple.ll index 2512ac9..1d0af23 100644 --- a/llvm/test/Transforms/CodeExtractor/live_shrink_multiple.ll +++ b/llvm/test/Transforms/CodeExtractor/live_shrink_multiple.ll @@ -8,8 +8,8 @@ define void @_Z3foov() local_unnamed_addr { bb: %tmp = alloca %class.A, align 4 %tmp1 = alloca %class.A, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %tmp) - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %tmp1) + call void @llvm.lifetime.start.p0(ptr nonnull %tmp) + call void @llvm.lifetime.start.p0(ptr nonnull %tmp1) %tmp4 = load i32, ptr @cond, align 4, !tbaa !2 %tmp5 = icmp eq i32 %tmp4, 0 br i1 %tmp5, label %bb6, label %bb7 @@ -19,18 +19,18 @@ bb6: ; preds = %bb br label %bb7 bb7: ; preds = %bb6, %bb - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %tmp1) - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %tmp) + call void @llvm.lifetime.end.p0(ptr nonnull %tmp1) + call void @llvm.lifetime.end.p0(ptr nonnull %tmp) ret void } ; Function Attrs: argmemonly nounwind -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) declare void @_ZN1A7memfuncEv(ptr) local_unnamed_addr ; Function Attrs: argmemonly nounwind -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) ; Function Attrs: uwtable define void @_Z3goov() local_unnamed_addr { diff --git a/llvm/test/Transforms/CodeExtractor/live_shrink_unsafe.ll b/llvm/test/Transforms/CodeExtractor/live_shrink_unsafe.ll index 7942418..c5bd626 100644 --- a/llvm/test/Transforms/CodeExtractor/live_shrink_unsafe.ll +++ b/llvm/test/Transforms/CodeExtractor/live_shrink_unsafe.ll @@ -14,8 +14,8 @@ define void @_Z3foo_unknown_mem_accessv() local_unnamed_addr { bb: %tmp = alloca %class.A, align 4 %tmp1 = alloca %class.A, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %tmp) - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %tmp1) + call void @llvm.lifetime.start.p0(ptr nonnull %tmp) + call void @llvm.lifetime.start.p0(ptr nonnull %tmp1) %tmp4 = load ptr, ptr @condptr, align 8, !tbaa !2 %tmp5 = load i32, ptr %tmp4, align 4, !tbaa !6 %tmp6 = icmp eq i32 %tmp5, 0 @@ -26,20 +26,20 @@ bb7: ; preds = %bb br label %bb8 bb8: ; preds = %bb7, %bb - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %tmp1) - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %tmp) + call void @llvm.lifetime.end.p0(ptr nonnull %tmp1) + call void @llvm.lifetime.end.p0(ptr nonnull %tmp) ret void } declare void @_Z3barv() local_unnamed_addr -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) declare void @_ZN1A7memfuncEv(ptr) local_unnamed_addr -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) define void @_Z3foo_unknown_calli(i32 %arg) local_unnamed_addr { bb: %tmp = alloca %class.A, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %tmp) + call void @llvm.lifetime.start.p0(ptr nonnull %tmp) tail call void @_Z3barv() %tmp2 = icmp eq i32 %arg, 0 br i1 %tmp2, label %bb3, label %bb4 @@ -49,7 +49,7 @@ bb3: ; preds = %bb br label %bb4 bb4: ; preds = %bb3, %bb - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %tmp) + call void @llvm.lifetime.end.p0(ptr nonnull %tmp) ret void } diff --git a/llvm/test/Transforms/CodeGenPrepare/ARM/tailcall-dup.ll b/llvm/test/Transforms/CodeGenPrepare/ARM/tailcall-dup.ll index 3f113e6..d2b79ab 100644 --- a/llvm/test/Transforms/CodeGenPrepare/ARM/tailcall-dup.ll +++ b/llvm/test/Transforms/CodeGenPrepare/ARM/tailcall-dup.ll @@ -4,8 +4,8 @@ target triple = "armv8m.main-none-eabi" declare ptr @f0() declare ptr @f1() -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind +declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind +declare void @llvm.lifetime.end.p0(ptr nocapture) nounwind define ptr @tail_dup() { ; CHECK-LABEL: tail_dup @@ -15,7 +15,7 @@ define ptr @tail_dup() { ; CHECK-NEXT: ret ptr bb0: %a = alloca i32 - call void @llvm.lifetime.start.p0(i64 -1, ptr %a) nounwind + call void @llvm.lifetime.start.p0(ptr %a) nounwind %tmp0 = tail call ptr @f0() br label %return bb1: @@ -23,7 +23,7 @@ bb1: br label %return return: %retval = phi ptr [ %tmp0, %bb0 ], [ %tmp1, %bb1 ] - call void @llvm.lifetime.end.p0(i64 -1, ptr %a) nounwind + call void @llvm.lifetime.end.p0(ptr %a) nounwind ret ptr %retval } diff --git a/llvm/test/Transforms/CodeGenPrepare/X86/tailcall-assume-xbb.ll b/llvm/test/Transforms/CodeGenPrepare/X86/tailcall-assume-xbb.ll index dd47d5e..f72756d 100644 --- a/llvm/test/Transforms/CodeGenPrepare/X86/tailcall-assume-xbb.ll +++ b/llvm/test/Transforms/CodeGenPrepare/X86/tailcall-assume-xbb.ll @@ -14,7 +14,7 @@ define ptr @foo(i64 %size, i64 %v1, i64 %v2) { entry: %a = alloca i8 - call void @llvm.lifetime.start.p0(i64 -1, ptr %a) nounwind + call void @llvm.lifetime.start.p0(ptr %a) nounwind %cmp1 = icmp ult i64 %size, 1025 br i1 %cmp1, label %if.end, label %case1 @@ -42,12 +42,12 @@ exit1: exit2: %retval2 = phi ptr [ %ret1, %case1 ], [ %retval1, %exit1 ] - call void @llvm.lifetime.end.p0(i64 -1, ptr %a) nounwind + call void @llvm.lifetime.end.p0(ptr %a) nounwind ret ptr %retval2 } declare void @llvm.assume(i1) declare ptr @qux() declare ptr @bar() -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind +declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind +declare void @llvm.lifetime.end.p0(ptr nocapture) nounwind diff --git a/llvm/test/Transforms/ConstantHoisting/AArch64/const-hoist-intrinsics.ll b/llvm/test/Transforms/ConstantHoisting/AArch64/const-hoist-intrinsics.ll index 6bf268b..6997c9f 100644 --- a/llvm/test/Transforms/ConstantHoisting/AArch64/const-hoist-intrinsics.ll +++ b/llvm/test/Transforms/ConstantHoisting/AArch64/const-hoist-intrinsics.ll @@ -65,25 +65,25 @@ define void @test_free_intrinsics(i64 %x) { ; CHECK-LABEL: @test_free_intrinsics( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[PTR:%.*]] = alloca i8, align 1 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 100000000032, ptr [[PTR]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 100000000064, ptr [[PTR]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 100000000128, ptr [[PTR]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[PTR]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[PTR]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PTR]]) ; CHECK-NEXT: [[I:%.*]] = call ptr @llvm.invariant.start.p0(i64 100000000256, ptr [[PTR]]) ; CHECK-NEXT: call void @llvm.invariant.end.p0(ptr [[I]], i64 100000000256, ptr [[PTR]]) ; CHECK-NEXT: ret void ; entry: %ptr = alloca i8 - call void @llvm.lifetime.start.p0(i64 100000000032, ptr %ptr) - call void @llvm.lifetime.start.p0(i64 100000000064, ptr %ptr) - call void @llvm.lifetime.end.p0(i64 100000000128, ptr %ptr) + call void @llvm.lifetime.start.p0(ptr %ptr) + call void @llvm.lifetime.start.p0(ptr %ptr) + call void @llvm.lifetime.end.p0(ptr %ptr) %i = call ptr @llvm.invariant.start.p0(i64 100000000256, ptr %ptr) call void @llvm.invariant.end.p0(ptr %i, i64 100000000256, ptr %ptr) ret void } -declare void @llvm.lifetime.start.p0(i64, ptr) -declare void @llvm.lifetime.end.p0(i64, ptr) +declare void @llvm.lifetime.start.p0(ptr) +declare void @llvm.lifetime.end.p0(ptr) declare ptr @llvm.invariant.start.p0(i64, ptr nocapture) declare void @llvm.invariant.end.p0(ptr, i64, ptr nocapture) diff --git a/llvm/test/Transforms/Coroutines/coro-alloca-06.ll b/llvm/test/Transforms/Coroutines/coro-alloca-06.ll index 89149ce..bf75196 100644 --- a/llvm/test/Transforms/Coroutines/coro-alloca-06.ll +++ b/llvm/test/Transforms/Coroutines/coro-alloca-06.ll @@ -17,11 +17,11 @@ entry: tricky: %2 = call ptr @await_suspend() store ptr %2, ptr %0, align 8 - call void @llvm.lifetime.start.p0(i64 8, ptr %1) + call void @llvm.lifetime.start.p0(ptr %1) store ptr %0, ptr %1, align 8 %3 = load ptr, ptr %1, align 8 %4 = load ptr, ptr %3, align 8 - call void @llvm.lifetime.end.p0(i64 8, ptr %1) + call void @llvm.lifetime.end.p0(ptr %1) br label %finish finish: @@ -49,9 +49,9 @@ suspend: ; CHECK: [[TMP2:%.*]] = call ptr @await_suspend() ; CHECK-NEXT: store ptr [[TMP2]], ptr [[TMP0]], align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]]) ; CHECK-NEXT: store ptr [[TMP0]], ptr [[TMP1]], align 8 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]]) ; declare ptr @llvm.coro.free(token, ptr) @@ -65,8 +65,8 @@ declare i1 @llvm.coro.alloc(token) declare ptr @llvm.coro.begin(token, ptr) declare i1 @llvm.coro.end(ptr, i1, token) -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) declare ptr @await_suspend() declare void @print(ptr nocapture) diff --git a/llvm/test/Transforms/Coroutines/coro-alloca-07.ll b/llvm/test/Transforms/Coroutines/coro-alloca-07.ll index 3b0acdd..8bfb8cf 100644 --- a/llvm/test/Transforms/Coroutines/coro-alloca-07.ll +++ b/llvm/test/Transforms/Coroutines/coro-alloca-07.ll @@ -13,11 +13,11 @@ entry: br i1 %n, label %flag_true, label %flag_false flag_true: - call void @llvm.lifetime.start.p0(i64 8, ptr %x) + call void @llvm.lifetime.start.p0(ptr %x) br label %merge flag_false: - call void @llvm.lifetime.start.p0(i64 8, ptr %y) + call void @llvm.lifetime.start.p0(ptr %y) br label %merge merge: @@ -51,7 +51,7 @@ declare i1 @llvm.coro.alloc(token) declare ptr @llvm.coro.begin(token, ptr) declare i1 @llvm.coro.end(ptr, i1, token) -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) declare void @print(ptr) declare noalias ptr @malloc(i32) diff --git a/llvm/test/Transforms/Coroutines/coro-alloca-08.ll b/llvm/test/Transforms/Coroutines/coro-alloca-08.ll index 5a14a0e..80be62a 100644 --- a/llvm/test/Transforms/Coroutines/coro-alloca-08.ll +++ b/llvm/test/Transforms/Coroutines/coro-alloca-08.ll @@ -18,9 +18,9 @@ entry: %alloc = call ptr @malloc(i64 16) #3 %vFrame = call noalias nonnull ptr @llvm.coro.begin(token %id, ptr %alloc) - call void @llvm.lifetime.start.p0(i64 100, ptr %testval) + call void @llvm.lifetime.start.p0(ptr %testval) call void @consume.i8.array(ptr %testval) - call void @llvm.lifetime.end.p0(i64 100, ptr %testval) + call void @llvm.lifetime.end.p0(ptr %testval) %save = call token @llvm.coro.save(ptr null) %suspend = call i8 @llvm.coro.suspend(token %save, i1 false) @@ -53,9 +53,9 @@ entry: await.ready: %StrayCoroSave = call token @llvm.coro.save(ptr null) - call void @llvm.lifetime.start.p0(i64 100, ptr %testval) + call void @llvm.lifetime.start.p0(ptr %testval) call void @consume.i8.array(ptr %testval) - call void @llvm.lifetime.end.p0(i64 100, ptr %testval) + call void @llvm.lifetime.end.p0(ptr %testval) br label %exit exit: @@ -76,5 +76,5 @@ declare ptr @llvm.coro.frame() #5 declare i8 @llvm.coro.suspend(token, i1) #3 declare ptr @llvm.coro.free(token, ptr nocapture readonly) #2 declare i1 @llvm.coro.end(ptr, i1, token) #3 -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #4 -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #4 +declare void @llvm.lifetime.start.p0(ptr nocapture) #4 +declare void @llvm.lifetime.end.p0(ptr nocapture) #4 diff --git a/llvm/test/Transforms/Coroutines/coro-alloca-09.ll b/llvm/test/Transforms/Coroutines/coro-alloca-09.ll deleted file mode 100644 index 5c60c5b..0000000 --- a/llvm/test/Transforms/Coroutines/coro-alloca-09.ll +++ /dev/null @@ -1,57 +0,0 @@ -; RUN: opt < %s -passes='cgscc(coro-split),simplifycfg,early-cse' -S | FileCheck %s - -%"struct.std::coroutine_handle" = type { ptr } -%"struct.std::coroutine_handle.0" = type { %"struct.std::coroutine_handle" } -%"struct.lean_future<int>::Awaiter" = type { i32, %"struct.std::coroutine_handle.0" } - -declare ptr @malloc(i64) - -%i8.array = type { [100 x i8] } -declare void @consume.i8(ptr) - -; The testval lives across suspend point so that it should be put on the frame. -; However, part of testval has lifetime marker which indicates the part -; wouldn't live across suspend point. -; This test whether or not %testval would be put on the frame by ignoring the -; partial lifetime markers. -define void @foo(ptr %to_store) presplitcoroutine { -entry: - %testval = alloca %i8.array - %subrange = getelementptr inbounds %i8.array, ptr %testval, i64 0, i32 0, i64 50 - %id = call token @llvm.coro.id(i32 0, ptr null, ptr null, ptr null) - %alloc = call ptr @malloc(i64 16) #3 - %vFrame = call noalias nonnull ptr @llvm.coro.begin(token %id, ptr %alloc) - - call void @llvm.lifetime.start.p0(i64 50, ptr %subrange) - call void @consume.i8(ptr %subrange) - call void @llvm.lifetime.end.p0(i64 50, ptr %subrange) - store ptr %testval, ptr %to_store - - %save = call token @llvm.coro.save(ptr null) - %suspend = call i8 @llvm.coro.suspend(token %save, i1 false) - switch i8 %suspend, label %exit [ - i8 0, label %await.ready - i8 1, label %exit - ] -await.ready: - %StrayCoroSave = call token @llvm.coro.save(ptr null) - br label %exit -exit: - call i1 @llvm.coro.end(ptr null, i1 false, token none) - ret void -} - -; Verify that for both foo and bar, testval isn't put on the frame. -; CHECK: %foo.Frame = type { ptr, ptr, %i8.array, i1 } - -declare token @llvm.coro.id(i32, ptr readnone, ptr nocapture readonly, ptr) -declare i1 @llvm.coro.alloc(token) #3 -declare i64 @llvm.coro.size.i64() #5 -declare ptr @llvm.coro.begin(token, ptr writeonly) #3 -declare token @llvm.coro.save(ptr) #3 -declare ptr @llvm.coro.frame() #5 -declare i8 @llvm.coro.suspend(token, i1) #3 -declare ptr @llvm.coro.free(token, ptr nocapture readonly) #2 -declare i1 @llvm.coro.end(ptr, i1, token) #3 -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #4 -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #4 diff --git a/llvm/test/Transforms/Coroutines/coro-alloca-loop-carried-address.ll b/llvm/test/Transforms/Coroutines/coro-alloca-loop-carried-address.ll index f828b22..8b8dbac 100644 --- a/llvm/test/Transforms/Coroutines/coro-alloca-loop-carried-address.ll +++ b/llvm/test/Transforms/Coroutines/coro-alloca-loop-carried-address.ll @@ -50,7 +50,7 @@ entry: br label %loop loop: - call void @llvm.lifetime.start(i64 8, ptr %stackvar0) + call void @llvm.lifetime.start(ptr %stackvar0) store i64 1234, ptr %stackvar0 @@ -58,7 +58,7 @@ loop: ; %stackvar1 and rely on it staying the same across suspension. call void @bar() - call void @llvm.lifetime.end(i64 8, ptr %stackvar0) + call void @llvm.lifetime.end(ptr %stackvar0) %save = call token @llvm.coro.save(ptr null) %suspend = call i8 @llvm.coro.suspend(token %save, i1 false) @@ -81,5 +81,5 @@ declare ptr @llvm.coro.begin(token, ptr writeonly) declare token @llvm.coro.save(ptr) declare i8 @llvm.coro.suspend(token, i1) declare i1 @llvm.coro.end(ptr, i1, token) -declare void @llvm.lifetime.start(i64, ptr nocapture) -declare void @llvm.lifetime.end(i64, ptr nocapture) +declare void @llvm.lifetime.start(ptr nocapture) +declare void @llvm.lifetime.end(ptr nocapture) diff --git a/llvm/test/Transforms/Coroutines/coro-async-addr-lifetime-infinite-loop-bug.ll b/llvm/test/Transforms/Coroutines/coro-async-addr-lifetime-infinite-loop-bug.ll index 07b3bd8..d662638 100644 --- a/llvm/test/Transforms/Coroutines/coro-async-addr-lifetime-infinite-loop-bug.ll +++ b/llvm/test/Transforms/Coroutines/coro-async-addr-lifetime-infinite-loop-bug.ll @@ -49,7 +49,7 @@ entry: %id = call token @llvm.coro.id.async(i32 128, i32 16, i32 0, ptr @my_async_function_fp) %hdl = call ptr @llvm.coro.begin(token %id, ptr null) - call void @llvm.lifetime.start.p0(i64 4, ptr %escaped_addr) + call void @llvm.lifetime.start.p0(ptr %escaped_addr) call void @escape(ptr %escaped_addr) br label %callblock @@ -80,6 +80,6 @@ declare void @llvm.coro.async.context.dealloc(ptr) declare swiftcc void @asyncSuspend(ptr) declare ptr @llvm.coro.async.resume() declare void @llvm.coro.async.size.replace(ptr, ptr) -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #0 -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #0 +declare void @llvm.lifetime.start.p0(ptr nocapture) #0 +declare void @llvm.lifetime.end.p0(ptr nocapture) #0 attributes #0 = { argmemonly nofree nosync nounwind willreturn } diff --git a/llvm/test/Transforms/Coroutines/coro-async-addr-lifetime-start-bug.ll b/llvm/test/Transforms/Coroutines/coro-async-addr-lifetime-start-bug.ll index 4010159..49c4207 100644 --- a/llvm/test/Transforms/Coroutines/coro-async-addr-lifetime-start-bug.ll +++ b/llvm/test/Transforms/Coroutines/coro-async-addr-lifetime-start-bug.ll @@ -43,7 +43,7 @@ entry: %id = call token @llvm.coro.id.async(i32 128, i32 16, i32 0, ptr @my_async_function_fp) %hdl = call ptr @llvm.coro.begin(token %id, ptr null) - call void @llvm.lifetime.start.p0(i64 4, ptr %escaped_addr) + call void @llvm.lifetime.start.p0(ptr %escaped_addr) br label %callblock @@ -81,7 +81,7 @@ loop: br label %callblock loop_exit: - call void @llvm.lifetime.end.p0(i64 4, ptr %escaped_addr) + call void @llvm.lifetime.end.p0(ptr %escaped_addr) call i1 (ptr, i1, ...) @llvm.coro.end.async(ptr %hdl, i1 false) unreachable } @@ -104,6 +104,6 @@ declare void @llvm.coro.async.context.dealloc(ptr) declare swiftcc void @asyncSuspend(ptr) declare ptr @llvm.coro.async.resume() declare void @llvm.coro.async.size.replace(ptr, ptr) -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #0 -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #0 +declare void @llvm.lifetime.start.p0(ptr nocapture) #0 +declare void @llvm.lifetime.end.p0(ptr nocapture) #0 attributes #0 = { argmemonly nofree nosync nounwind willreturn } diff --git a/llvm/test/Transforms/Coroutines/coro-byval-param.ll b/llvm/test/Transforms/Coroutines/coro-byval-param.ll index 4705918..38ab5ac 100644 --- a/llvm/test/Transforms/Coroutines/coro-byval-param.ll +++ b/llvm/test/Transforms/Coroutines/coro-byval-param.ll @@ -19,7 +19,7 @@ coro.alloc: ; preds = %entry coro.init: ; preds = %coro.alloc, %entry %3 = phi ptr [ null, %entry ], [ %call, %coro.alloc ] %4 = call ptr @llvm.coro.begin(token %0, ptr %3) #10 - call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %__promise) #2 + call void @llvm.lifetime.start.p0(ptr nonnull %__promise) #2 %call2 = call ptr @_ZN4task12promise_type17get_return_objectEv(ptr nonnull dereferenceable(1) %__promise) call void @initial_suspend(ptr nonnull dereferenceable(1) %__promise) %5 = call token @llvm.coro.save(ptr null) @@ -31,9 +31,9 @@ coro.init: ; preds = %coro.alloc, %entry ] init.ready: ; preds = %coro.init - call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %a2) #2 + call void @llvm.lifetime.start.p0(ptr nonnull %a2) #2 call void @llvm.memcpy.p0.p0.i64(ptr align 8 %a2, ptr align 8 %a1, i64 24, i1 false) - call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %a2) #2 + call void @llvm.lifetime.end.p0(ptr nonnull %a2) #2 call void @_ZN4task12promise_type13final_suspendEv(ptr nonnull dereferenceable(1) %__promise) #2 %7 = call token @llvm.coro.save(ptr null) call fastcc void @_ZNSt12experimental13coroutines_v116coroutine_handleIN4task12promise_typeEE12from_addressEPv(ptr %4) #2 @@ -42,7 +42,7 @@ init.ready: ; preds = %coro.init br i1 %switch, label %cleanup33, label %coro.ret cleanup33: ; preds = %init.ready, %coro.init - call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %__promise) #2 + call void @llvm.lifetime.end.p0(ptr nonnull %__promise) #2 %9 = call ptr @llvm.coro.free(token %0, ptr %4) %.not = icmp eq ptr %9, null br i1 %.not, label %coro.ret, label %coro.free @@ -75,7 +75,7 @@ declare i64 @llvm.coro.size.i64() #4 declare ptr @llvm.coro.begin(token, ptr writeonly) #2 ; Function Attrs: argmemonly nofree nosync nounwind willreturn -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #5 +declare void @llvm.lifetime.start.p0(ptr nocapture) #5 ; Function Attrs: argmemonly nofree nounwind willreturn declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg) #6 @@ -93,7 +93,7 @@ declare token @llvm.coro.save(ptr) #2 declare hidden fastcc void @_ZNSt12experimental13coroutines_v116coroutine_handleIN4task12promise_typeEE12from_addressEPv(ptr) unnamed_addr #7 align 2 ; Function Attrs: argmemonly nofree nosync nounwind willreturn -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #5 +declare void @llvm.lifetime.end.p0(ptr nocapture) #5 ; Function Attrs: nounwind declare i8 @llvm.coro.suspend(token, i1) #2 diff --git a/llvm/test/Transforms/Coroutines/coro-elide-musttail.ll b/llvm/test/Transforms/Coroutines/coro-elide-musttail.ll index 6c6e5a6..d369a21 100644 --- a/llvm/test/Transforms/Coroutines/coro-elide-musttail.ll +++ b/llvm/test/Transforms/Coroutines/coro-elide-musttail.ll @@ -48,7 +48,7 @@ entry: } ; Function Attrs: argmemonly nofree nosync nounwind willreturn -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #0 +declare void @llvm.lifetime.start.p0(ptr nocapture) #0 ; Function Attrs: argmemonly nounwind readonly declare token @llvm.coro.id(i32, ptr readnone, ptr nocapture readonly, ptr) #1 diff --git a/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-00.ll b/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-00.ll index c9700c8..bf08d6f 100644 --- a/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-00.ll +++ b/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-00.ll @@ -16,33 +16,33 @@ entry: br i1 %cond, label %then, label %else then: - call void @llvm.lifetime.start.p0(i64 500, ptr nonnull %data) + call void @llvm.lifetime.start.p0(ptr nonnull %data) call void @consume(ptr %data) %suspend.value = call i8 @llvm.coro.suspend(token none, i1 false) switch i8 %suspend.value, label %coro.ret [i8 0, label %resume i8 1, label %cleanup1] resume: - call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %data) + call void @llvm.lifetime.end.p0(ptr nonnull %data) br label %cleanup1 cleanup1: - call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %data) + call void @llvm.lifetime.end.p0(ptr nonnull %data) br label %cleanup else: - call void @llvm.lifetime.start.p0(i64 500, ptr nonnull %data2) + call void @llvm.lifetime.start.p0(ptr nonnull %data2) call void @consume(ptr %data2) %suspend.value2 = call i8 @llvm.coro.suspend(token none, i1 false) switch i8 %suspend.value2, label %coro.ret [i8 0, label %resume2 i8 1, label %cleanup2] resume2: - call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %data2) + call void @llvm.lifetime.end.p0(ptr nonnull %data2) br label %cleanup2 cleanup2: - call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %data2) + call void @llvm.lifetime.end.p0(ptr nonnull %data2) br label %cleanup cleanup: @@ -72,5 +72,5 @@ declare noalias ptr @malloc(i32) declare double @print(double) declare void @free(ptr) -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) diff --git a/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-01.ll b/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-01.ll index 584caa35..78c6f0c 100644 --- a/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-01.ll +++ b/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-01.ll @@ -17,10 +17,10 @@ entry: br label %init.ready init.ready: %1 = call noalias nonnull ptr @llvm.coro.begin(token %0, ptr null) - call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %__promise) + call void @llvm.lifetime.start.p0(ptr nonnull %__promise) br i1 %cond, label %if.then, label %if.else if.then: - call void @llvm.lifetime.start.p0(i64 500, ptr nonnull %a) + call void @llvm.lifetime.start.p0(ptr nonnull %a) call void @consume(ptr nonnull %a) %save = call token @llvm.coro.save(ptr null) %suspend = call i8 @llvm.coro.suspend(token %save, i1 false) @@ -29,10 +29,10 @@ if.then: i8 1, label %cleanup1 ] await.ready: - call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %a) + call void @llvm.lifetime.end.p0(ptr nonnull %a) br label %cleanup1 if.else: - call void @llvm.lifetime.start.p0(i64 500, ptr nonnull %b) + call void @llvm.lifetime.start.p0(ptr nonnull %b) call void @consume(ptr nonnull %b) %save2 = call token @llvm.coro.save(ptr null) %suspend2 = call i8 @llvm.coro.suspend(token %save2, i1 false) @@ -41,13 +41,13 @@ if.else: i8 1, label %cleanup2 ] await2.ready: - call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %b) + call void @llvm.lifetime.end.p0(ptr nonnull %b) br label %cleanup2 cleanup1: - call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %a) + call void @llvm.lifetime.end.p0(ptr nonnull %a) br label %cleanup cleanup2: - call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %b) + call void @llvm.lifetime.end.p0(ptr nonnull %b) br label %cleanup cleanup: call ptr @llvm.coro.free(token %0, ptr %1) @@ -69,5 +69,5 @@ declare ptr @llvm.coro.frame() #5 declare i8 @llvm.coro.suspend(token, i1) #3 declare ptr @llvm.coro.free(token, ptr nocapture readonly) #2 declare i1 @llvm.coro.end(ptr, i1, token) #3 -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #4 -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #4 +declare void @llvm.lifetime.start.p0(ptr nocapture) #4 +declare void @llvm.lifetime.end.p0(ptr nocapture) #4 diff --git a/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-02.ll b/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-02.ll index f916ebb..8265731 100644 --- a/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-02.ll +++ b/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-02.ll @@ -19,10 +19,10 @@ entry: br label %init.ready init.ready: %1 = call noalias nonnull ptr @llvm.coro.begin(token %0, ptr null) - call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %__promise) + call void @llvm.lifetime.start.p0(ptr nonnull %__promise) br i1 %cond, label %if.then, label %if.else if.then: - call void @llvm.lifetime.start.p0(i64 500, ptr nonnull %a) + call void @llvm.lifetime.start.p0(ptr nonnull %a) call void @consume(ptr nonnull %a) %save = call token @llvm.coro.save(ptr null) %suspend = call i8 @llvm.coro.suspend(token %save, i1 false) @@ -31,10 +31,10 @@ if.then: i8 1, label %cleanup1 ] await.ready: - call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %a) + call void @llvm.lifetime.end.p0(ptr nonnull %a) br label %cleanup1 if.else: - call void @llvm.lifetime.start.p0(i64 300, ptr nonnull %b) + call void @llvm.lifetime.start.p0(ptr nonnull %b) call void @consume.2(ptr nonnull %b) %save2 = call token @llvm.coro.save(ptr null) %suspend2 = call i8 @llvm.coro.suspend(token %save2, i1 false) @@ -43,13 +43,13 @@ if.else: i8 1, label %cleanup2 ] await2.ready: - call void @llvm.lifetime.end.p0(i64 300, ptr nonnull %b) + call void @llvm.lifetime.end.p0(ptr nonnull %b) br label %cleanup2 cleanup1: - call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %a) + call void @llvm.lifetime.end.p0(ptr nonnull %a) br label %cleanup cleanup2: - call void @llvm.lifetime.end.p0(i64 300, ptr nonnull %b) + call void @llvm.lifetime.end.p0(ptr nonnull %b) br label %cleanup cleanup: call ptr @llvm.coro.free(token %0, ptr %1) @@ -70,5 +70,5 @@ declare ptr @llvm.coro.frame() #5 declare i8 @llvm.coro.suspend(token, i1) #3 declare ptr @llvm.coro.free(token, ptr nocapture readonly) #2 declare i1 @llvm.coro.end(ptr, i1, token) #3 -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #4 -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #4 +declare void @llvm.lifetime.start.p0(ptr nocapture) #4 +declare void @llvm.lifetime.end.p0(ptr nocapture) #4 diff --git a/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-03.ll b/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-03.ll index 525df87..66d4137 100644 --- a/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-03.ll +++ b/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-03.ll @@ -16,33 +16,33 @@ entry: br i1 %cond, label %then, label %else then: - call void @llvm.lifetime.start.p0(i64 500, ptr nonnull %data) + call void @llvm.lifetime.start.p0(ptr nonnull %data) call void @consume(ptr %data) %suspend.value = call i8 @llvm.coro.suspend(token none, i1 false) switch i8 %suspend.value, label %coro.ret [i8 0, label %resume i8 1, label %cleanup1] resume: - call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %data) + call void @llvm.lifetime.end.p0(ptr nonnull %data) br label %cleanup1 cleanup1: - call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %data) + call void @llvm.lifetime.end.p0(ptr nonnull %data) br label %cleanup else: - call void @llvm.lifetime.start.p0(i64 500, ptr nonnull %data2) + call void @llvm.lifetime.start.p0(ptr nonnull %data2) call void @consume(ptr %data2) %suspend.value2 = call i8 @llvm.coro.suspend(token none, i1 false) switch i8 %suspend.value2, label %coro.ret [i8 0, label %resume2 i8 1, label %cleanup2] resume2: - call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %data2) + call void @llvm.lifetime.end.p0(ptr nonnull %data2) br label %cleanup2 cleanup2: - call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %data2) + call void @llvm.lifetime.end.p0(ptr nonnull %data2) br label %cleanup cleanup: @@ -72,5 +72,5 @@ declare noalias ptr @malloc(i32) declare double @print(double) declare void @free(ptr) -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) diff --git a/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-04.ll b/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-04.ll index 27e0c47..6ff31e5 100644 --- a/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-04.ll +++ b/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-04.ll @@ -19,10 +19,10 @@ entry: br label %init.ready init.ready: %1 = call noalias nonnull ptr @llvm.coro.begin(token %0, ptr null) - call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %__promise) + call void @llvm.lifetime.start.p0(ptr nonnull %__promise) br i1 %cond, label %if.then, label %if.else if.then: - call void @llvm.lifetime.start.p0(i64 500, ptr nonnull %a) + call void @llvm.lifetime.start.p0(ptr nonnull %a) call void @consume(ptr nonnull %a) %save = call token @llvm.coro.save(ptr null) %suspend = call i8 @llvm.coro.suspend(token %save, i1 false) @@ -31,10 +31,10 @@ if.then: i8 1, label %cleanup1 ] await.ready: - call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %a) + call void @llvm.lifetime.end.p0(ptr nonnull %a) br label %cleanup1 if.else: - call void @llvm.lifetime.start.p0(i64 300, ptr nonnull %b) + call void @llvm.lifetime.start.p0(ptr nonnull %b) call void @consume.2(ptr nonnull %b) %save2 = call token @llvm.coro.save(ptr null) %suspend2 = call i8 @llvm.coro.suspend(token %save2, i1 false) @@ -43,13 +43,13 @@ if.else: i8 1, label %cleanup2 ] await2.ready: - call void @llvm.lifetime.end.p0(i64 300, ptr nonnull %b) + call void @llvm.lifetime.end.p0(ptr nonnull %b) br label %cleanup2 cleanup1: - call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %a) + call void @llvm.lifetime.end.p0(ptr nonnull %a) br label %cleanup cleanup2: - call void @llvm.lifetime.end.p0(i64 300, ptr nonnull %b) + call void @llvm.lifetime.end.p0(ptr nonnull %b) br label %cleanup cleanup: call ptr @llvm.coro.free(token %0, ptr %1) @@ -70,5 +70,5 @@ declare ptr @llvm.coro.frame() #5 declare i8 @llvm.coro.suspend(token, i1) #3 declare ptr @llvm.coro.free(token, ptr nocapture readonly) #2 declare i1 @llvm.coro.end(ptr, i1, token) #3 -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #4 -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #4 +declare void @llvm.lifetime.start.p0(ptr nocapture) #4 +declare void @llvm.lifetime.end.p0(ptr nocapture) #4 diff --git a/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-05.ll b/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-05.ll index 6d93eea..c3da8e8 100644 --- a/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-05.ll +++ b/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-05.ll @@ -19,10 +19,10 @@ entry: br label %init.ready init.ready: %1 = call noalias nonnull ptr @llvm.coro.begin(token %0, ptr null) - call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %__promise) + call void @llvm.lifetime.start.p0(ptr nonnull %__promise) br i1 %cond, label %if.then, label %if.else if.then: - call void @llvm.lifetime.start.p0(i64 500, ptr nonnull %a) + call void @llvm.lifetime.start.p0(ptr nonnull %a) call void @consume(ptr nonnull %a) %save = call token @llvm.coro.save(ptr null) %suspend = call i8 @llvm.coro.suspend(token %save, i1 false) @@ -31,10 +31,10 @@ if.then: i8 1, label %cleanup1 ] await.ready: - call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %a) + call void @llvm.lifetime.end.p0(ptr nonnull %a) br label %cleanup1 if.else: - call void @llvm.lifetime.start.p0(i64 400, ptr nonnull %b) + call void @llvm.lifetime.start.p0(ptr nonnull %b) call void @consume.2(ptr nonnull %b) %save2 = call token @llvm.coro.save(ptr null) %suspend2 = call i8 @llvm.coro.suspend(token %save2, i1 false) @@ -43,13 +43,13 @@ if.else: i8 1, label %cleanup2 ] await2.ready: - call void @llvm.lifetime.end.p0(i64 400, ptr nonnull %b) + call void @llvm.lifetime.end.p0(ptr nonnull %b) br label %cleanup2 cleanup1: - call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %a) + call void @llvm.lifetime.end.p0(ptr nonnull %a) br label %cleanup cleanup2: - call void @llvm.lifetime.end.p0(i64 400, ptr nonnull %b) + call void @llvm.lifetime.end.p0(ptr nonnull %b) br label %cleanup cleanup: call ptr @llvm.coro.free(token %0, ptr %1) @@ -70,5 +70,5 @@ declare ptr @llvm.coro.frame() #5 declare i8 @llvm.coro.suspend(token, i1) #3 declare ptr @llvm.coro.free(token, ptr nocapture readonly) #2 declare i1 @llvm.coro.end(ptr, i1, token) #3 -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #4 -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #4 +declare void @llvm.lifetime.start.p0(ptr nocapture) #4 +declare void @llvm.lifetime.end.p0(ptr nocapture) #4 diff --git a/llvm/test/Transforms/Coroutines/coro-lifetime-end.ll b/llvm/test/Transforms/Coroutines/coro-lifetime-end.ll index 8d0e772..df2ed7e 100644 --- a/llvm/test/Transforms/Coroutines/coro-lifetime-end.ll +++ b/llvm/test/Transforms/Coroutines/coro-lifetime-end.ll @@ -31,7 +31,7 @@ entry: %alloc = call ptr @malloc(i64 16) #3 %vFrame = call noalias nonnull ptr @llvm.coro.begin(token %id, ptr %alloc) - call void @llvm.lifetime.start.p0(i64 100, ptr %testval) + call void @llvm.lifetime.start.p0(ptr %testval) call void @consume.i8.array(ptr %testval) %save = call token @llvm.coro.save(ptr null) @@ -68,7 +68,7 @@ entry: %alloc = call ptr @malloc(i64 16) #3 %vFrame = call noalias nonnull ptr @llvm.coro.begin(token %id, ptr %alloc) - call void @llvm.lifetime.start.p0(i64 100, ptr %testval) + call void @llvm.lifetime.start.p0(ptr %testval) call void @consume.i8.array(ptr %testval) %save = call token @llvm.coro.save(ptr null) @@ -81,7 +81,7 @@ await.ready: br label %exit exit: call i1 @llvm.coro.end(ptr null, i1 false, token none) - call void @llvm.lifetime.end.p0(i64 100, ptr %testval) + call void @llvm.lifetime.end.p0(ptr %testval) ret void } @@ -107,7 +107,7 @@ entry: %alloc = call ptr @malloc(i64 16) #3 %vFrame = call noalias nonnull ptr @llvm.coro.begin(token %id, ptr %alloc) - call void @llvm.lifetime.start.p0(i64 100, ptr %testval) + call void @llvm.lifetime.start.p0(ptr %testval) call void @consume.i8.array(ptr %testval) %0 = load i8, ptr @testbool, align 1 @@ -115,7 +115,7 @@ entry: br i1 %tobool, label %if.then, label %if.end if.then: - call void @llvm.lifetime.end.p0(i64 100, ptr %testval) + call void @llvm.lifetime.end.p0(ptr %testval) br label %if.end if.end: @@ -138,5 +138,5 @@ declare ptr @llvm.coro.begin(token, ptr writeonly) #3 declare ptr @llvm.coro.frame() #5 declare i8 @llvm.coro.suspend(token, i1) #3 declare i1 @llvm.coro.end(ptr, i1, token) #3 -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #4 -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #4 +declare void @llvm.lifetime.start.p0(ptr nocapture) #4 +declare void @llvm.lifetime.end.p0(ptr nocapture) #4 diff --git a/llvm/test/Transforms/Coroutines/coro-only-destroy-when-complete.ll b/llvm/test/Transforms/Coroutines/coro-only-destroy-when-complete.ll index 3f0899a..c3d0fb1 100644 --- a/llvm/test/Transforms/Coroutines/coro-only-destroy-when-complete.ll +++ b/llvm/test/Transforms/Coroutines/coro-only-destroy-when-complete.ll @@ -19,7 +19,7 @@ coro.alloc: ; preds = %entry init.suspend: ; preds = %entry, %coro.alloc %3 = phi ptr [ null, %entry ], [ %call, %coro.alloc ] %4 = call ptr @llvm.coro.begin(token %0, ptr %3) #12 - call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %__promise) #3 + call void @llvm.lifetime.start.p0(ptr nonnull %__promise) #3 store ptr null, ptr %__promise, align 8 %5 = call token @llvm.coro.save(ptr null) %6 = call i8 @llvm.coro.suspend(token %5, i1 false) @@ -80,7 +80,7 @@ cleanup3: br label %cleanup62 cleanup62: ; preds = %await2.suspend, %await.suspend, %init.suspend, %final.suspend - call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %__promise) #3 + call void @llvm.lifetime.end.p0(ptr nonnull %__promise) #3 %18 = call ptr @llvm.coro.free(token %0, ptr %4) %.not = icmp eq ptr %18, null br i1 %.not, label %coro.ret, label %coro.free @@ -99,9 +99,9 @@ declare i1 @llvm.coro.alloc(token) #3 declare dso_local noundef nonnull ptr @_Znwm(i64 noundef) local_unnamed_addr #4 declare i64 @llvm.coro.size.i64() #5 declare ptr @llvm.coro.begin(token, ptr writeonly) #3 -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #6 +declare void @llvm.lifetime.start.p0(ptr nocapture) #6 declare token @llvm.coro.save(ptr) #7 -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #6 +declare void @llvm.lifetime.end.p0(ptr nocapture) #6 declare i8 @llvm.coro.suspend(token, i1) #3 declare ptr @_Z5Innerv() local_unnamed_addr declare dso_local void @_ZdlPv(ptr noundef) local_unnamed_addr #8 diff --git a/llvm/test/Transforms/Coroutines/coro-retcon-unreachable.ll b/llvm/test/Transforms/Coroutines/coro-retcon-unreachable.ll index 8ed0384..31839aa 100644 --- a/llvm/test/Transforms/Coroutines/coro-retcon-unreachable.ll +++ b/llvm/test/Transforms/Coroutines/coro-retcon-unreachable.ll @@ -37,9 +37,9 @@ declare token @llvm.coro.id.retcon.once(i32, i32, ptr, ptr, ptr, ptr) #5 declare ptr @llvm.coro.begin(token, ptr writeonly) #5 declare token @llvm.coro.alloca.alloc.i64(i64, i32) #5 declare ptr @llvm.coro.alloca.get(token) #5 -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #6 +declare void @llvm.lifetime.start.p0(ptr nocapture) #6 declare i1 @llvm.coro.suspend.retcon.i1(...) #5 -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #6 +declare void @llvm.lifetime.end.p0(ptr nocapture) #6 declare void @llvm.coro.alloca.free(token) #5 declare i1 @llvm.coro.end(ptr, i1, token) #5 diff --git a/llvm/test/Transforms/Coroutines/coro-split-02.ll b/llvm/test/Transforms/Coroutines/coro-split-02.ll index 31e8e81..c487ab1 100644 --- a/llvm/test/Transforms/Coroutines/coro-split-02.ll +++ b/llvm/test/Transforms/Coroutines/coro-split-02.ll @@ -27,10 +27,10 @@ entry: await.ready: %StrayCoroSave = call token @llvm.coro.save(ptr null) %val = load i32, ptr %ref.tmp7 - call void @llvm.lifetime.start.p0(i64 4, ptr %testval) + call void @llvm.lifetime.start.p0(ptr %testval) %test = load i32, ptr %testval call void @print(i32 %test) - call void @llvm.lifetime.end.p0(i64 4, ptr %testval) + call void @llvm.lifetime.end.p0(ptr %testval) call void @print(i32 %val) br label %exit exit: @@ -42,10 +42,10 @@ exit: ; CHECK: %testval = alloca i32 ; CHECK-NOT: call token @llvm.coro.save(ptr null) ; CHECK: %val = load i32, ptr %ref.tmp7 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr %testval) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %testval) ; CHECK-NEXT: %test = load i32, ptr %testval ; CHECK-NEXT: call void @print(i32 %test) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr %testval) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %testval) ; CHECK-NEXT: call void @print(i32 %val) ; CHECK-NEXT: ret void @@ -61,5 +61,5 @@ declare i8 @llvm.coro.suspend(token, i1) #3 declare void @"\01??3@YAXPEAX@Z"(ptr) local_unnamed_addr #10 declare ptr @llvm.coro.free(token, ptr nocapture readonly) #2 declare i1 @llvm.coro.end(ptr, i1, token) #3 -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #4 -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #4 +declare void @llvm.lifetime.start.p0(ptr nocapture) #4 +declare void @llvm.lifetime.end.p0(ptr nocapture) #4 diff --git a/llvm/test/Transforms/Coroutines/coro-split-dbg.ll b/llvm/test/Transforms/Coroutines/coro-split-dbg.ll index 184d4a5..9a9e3c3 100644 --- a/llvm/test/Transforms/Coroutines/coro-split-dbg.ll +++ b/llvm/test/Transforms/Coroutines/coro-split-dbg.ll @@ -43,7 +43,7 @@ coro_Suspend: ; preds = %for.cond, %if.then, } ; Function Attrs: argmemonly nounwind -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #4 +declare void @llvm.lifetime.start.p0(ptr nocapture) #4 ; Function Attrs: argmemonly nounwind readonly declare token @llvm.coro.id(i32, ptr readnone, ptr nocapture readonly, ptr) #5 @@ -54,7 +54,7 @@ declare i64 @llvm.coro.size.i64() #1 declare ptr @llvm.coro.begin(token, ptr writeonly) #7 declare token @llvm.coro.save(ptr) #7 declare i8 @llvm.coro.suspend(token, i1) #7 -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #4 +declare void @llvm.lifetime.end.p0(ptr nocapture) #4 declare ptr @llvm.coro.free(token, ptr nocapture readonly) #5 declare void @free(ptr nocapture) local_unnamed_addr #6 declare i1 @llvm.coro.end(ptr, i1, token) #7 diff --git a/llvm/test/Transforms/Coroutines/coro-split-musttail-chain-pgo-counter-promo.ll b/llvm/test/Transforms/Coroutines/coro-split-musttail-chain-pgo-counter-promo.ll index e2ed205..e661932 100644 --- a/llvm/test/Transforms/Coroutines/coro-split-musttail-chain-pgo-counter-promo.ll +++ b/llvm/test/Transforms/Coroutines/coro-split-musttail-chain-pgo-counter-promo.ll @@ -33,8 +33,8 @@ declare ptr @llvm.coro.subfn.addr(ptr nocapture readonly, i8) declare i8 @llvm.coro.suspend(token, i1) declare void @llvm.instrprof.increment(ptr, i64, i32, i32) declare void @llvm.instrprof.value.profile(ptr, i64, i64, i32, i32) -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) ; Function Attrs: noinline nounwind presplitcoroutine uwtable define ptr @f(i32 %0) presplitcoroutine align 32 { @@ -56,11 +56,11 @@ define ptr @f(i32 %0) presplitcoroutine align 32 { 12: ; preds = %8, %1 %13 = phi ptr [ null, %1 ], [ %11, %8 ] %14 = call ptr @llvm.coro.begin(token %6, ptr %13) #28 - call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %3) #9 + call void @llvm.lifetime.start.p0(ptr nonnull %3) #9 store ptr null, ptr %3, align 16 %15 = getelementptr inbounds {ptr, i64}, ptr %3, i64 0, i32 1 store i64 0, ptr %15, align 8 - call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %4) #9 + call void @llvm.lifetime.start.p0(ptr nonnull %4) #9 store ptr %3, ptr %4, align 8 %16 = call token @llvm.coro.save(ptr null) call void @await_suspend(ptr noundef nonnull align 1 dereferenceable(1) %4, ptr %14) #9 @@ -71,7 +71,7 @@ define ptr @f(i32 %0) presplitcoroutine align 32 { ] 18: ; preds = %12 - call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %4) #9 + call void @llvm.lifetime.end.p0(ptr nonnull %4) #9 %19 = icmp slt i32 0, %0 br i1 %19, label %20, label %36 @@ -79,12 +79,12 @@ define ptr @f(i32 %0) presplitcoroutine align 32 { br label %22 21: ; preds = %12 - call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %4) #9 + call void @llvm.lifetime.end.p0(ptr nonnull %4) #9 br label %54 22: ; preds = %20, %31 %23 = phi i32 [ 0, %20 ], [ %32, %31 ] - call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %5) #9 + call void @llvm.lifetime.start.p0(ptr nonnull %5) #9 %24 = call ptr @other_coro() store ptr %3, ptr %5, align 8 %25 = getelementptr inbounds { ptr, ptr }, ptr %5, i64 0, i32 1 @@ -98,13 +98,13 @@ define ptr @f(i32 %0) presplitcoroutine align 32 { ] 31: ; preds = %22 - call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %5) #9 + call void @llvm.lifetime.end.p0(ptr nonnull %5) #9 %32 = add nuw nsw i32 %23, 1 %33 = icmp slt i32 %32, %0 br i1 %33, label %22, label %35, !llvm.loop !0 34: ; preds = %22 - call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %5) #9 + call void @llvm.lifetime.end.p0(ptr nonnull %5) #9 br label %54 35: ; preds = %31 @@ -142,11 +142,11 @@ define ptr @f(i32 %0) presplitcoroutine align 32 { br label %54 53: ; preds = %47 - call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %2) #9 + call void @llvm.lifetime.start.p0(ptr nonnull %2) #9 unreachable 54: ; preds = %52, %34, %21 - call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %3) #9 + call void @llvm.lifetime.end.p0(ptr nonnull %3) #9 %55 = call ptr @llvm.coro.free(token %6, ptr %14) %56 = icmp eq ptr %55, null br i1 %56, label %61, label %57 diff --git a/llvm/test/Transforms/Coroutines/coro-split-musttail5.ll b/llvm/test/Transforms/Coroutines/coro-split-musttail5.ll index 7c1a13f..b256175 100644 --- a/llvm/test/Transforms/Coroutines/coro-split-musttail5.ll +++ b/llvm/test/Transforms/Coroutines/coro-split-musttail5.ll @@ -9,7 +9,7 @@ entry: %id = call token @llvm.coro.id(i32 0, ptr null, ptr null, ptr null) %alloc = call ptr @malloc(i64 16) #3 %alloc.var = alloca i8 - call void @llvm.lifetime.start.p0(i64 1, ptr %alloc.var) + call void @llvm.lifetime.start.p0(ptr %alloc.var) %vFrame = call noalias nonnull ptr @llvm.coro.begin(token %id, ptr %alloc) %save = call token @llvm.coro.save(ptr null) @@ -29,7 +29,7 @@ await.suspend: ] await.ready: call void @consume(ptr %alloc.var) - call void @llvm.lifetime.end.p0(i64 1, ptr %alloc.var) + call void @llvm.lifetime.end.p0(ptr %alloc.var) br label %exit exit: call i1 @llvm.coro.end(ptr null, i1 false, token none) @@ -53,8 +53,8 @@ declare i1 @llvm.coro.end(ptr, i1, token) #2 declare ptr @llvm.coro.subfn.addr(ptr nocapture readonly, i8) #1 declare ptr @malloc(i64) declare void @consume(ptr) -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) declare ptr @await_suspend_function(ptr %awaiter, ptr %hdl) attributes #0 = { presplitcoroutine } diff --git a/llvm/test/Transforms/Coroutines/coro-split-musttail6.ll b/llvm/test/Transforms/Coroutines/coro-split-musttail6.ll index e05169a..99174ff 100644 --- a/llvm/test/Transforms/Coroutines/coro-split-musttail6.ll +++ b/llvm/test/Transforms/Coroutines/coro-split-musttail6.ll @@ -13,7 +13,7 @@ entry: %id = call token @llvm.coro.id(i32 0, ptr null, ptr null, ptr null) %alloc = call ptr @malloc(i64 16) #3 %alloc.var = alloca i64 - call void @llvm.lifetime.start.p0(i64 1, ptr %alloc.var) + call void @llvm.lifetime.start.p0(ptr %alloc.var) %vFrame = call noalias nonnull ptr @llvm.coro.begin(token %id, ptr %alloc) %save = call token @llvm.coro.save(ptr null) @@ -33,7 +33,7 @@ await.suspend: ] await.ready: call void @consume(ptr %alloc.var) - call void @llvm.lifetime.end.p0(i64 1, ptr %alloc.var) + call void @llvm.lifetime.end.p0(ptr %alloc.var) br label %exit exit: call i1 @llvm.coro.end(ptr null, i1 false, token none) @@ -51,7 +51,7 @@ entry: %id = call token @llvm.coro.id(i32 0, ptr null, ptr null, ptr null) %alloc = call ptr @malloc(i64 16) #3 %alloc.var = alloca i64 - call void @llvm.lifetime.start.p0(i64 1, ptr %alloc.var) + call void @llvm.lifetime.start.p0(ptr %alloc.var) %vFrame = call noalias nonnull ptr @llvm.coro.begin(token %id, ptr %alloc) %save = call token @llvm.coro.save(ptr null) @@ -71,7 +71,7 @@ await.suspend: ] await.ready: call void @consume(ptr %alloc.var) - call void @llvm.lifetime.end.p0(i64 1, ptr %alloc.var) + call void @llvm.lifetime.end.p0(ptr %alloc.var) br label %exit cleanup: @@ -106,8 +106,8 @@ declare ptr @llvm.coro.subfn.addr(ptr nocapture readonly, i8) #1 declare ptr @malloc(i64) declare void @delete(ptr nonnull) #2 declare void @consume(ptr) -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) declare ptr @await_suspend_function(ptr %awaiter, ptr %hdl) attributes #0 = { presplitcoroutine } diff --git a/llvm/test/Transforms/Coroutines/coro-split-musttail7.ll b/llvm/test/Transforms/Coroutines/coro-split-musttail7.ll index 8ceb0dd..91f8543 100644 --- a/llvm/test/Transforms/Coroutines/coro-split-musttail7.ll +++ b/llvm/test/Transforms/Coroutines/coro-split-musttail7.ll @@ -11,7 +11,7 @@ entry: %id = call token @llvm.coro.id(i32 0, ptr null, ptr null, ptr null) %alloc = call ptr @malloc(i64 16) #3 %alloc.var = alloca i64 - call void @llvm.lifetime.start.p0(i64 1, ptr %alloc.var) + call void @llvm.lifetime.start.p0(ptr %alloc.var) %vFrame = call noalias nonnull ptr @llvm.coro.begin(token %id, ptr %alloc) %save = call token @llvm.coro.save(ptr null) @@ -36,7 +36,7 @@ await.suspend: ] await.ready: call void @consume(ptr %alloc.var) - call void @llvm.lifetime.end.p0(i64 1, ptr %alloc.var) + call void @llvm.lifetime.end.p0(ptr %alloc.var) br label %exit exit: %result = phi i64 [0, %entry], [0, %entry], [%foo, %await.suspend], [%foo, %await.suspend], [%foo, %await.ready] @@ -57,7 +57,7 @@ entry: %id = call token @llvm.coro.id(i32 0, ptr null, ptr null, ptr null) %alloc = call ptr @malloc(i64 16) #3 %alloc.var = alloca i64 - call void @llvm.lifetime.start.p0(i64 1, ptr %alloc.var) + call void @llvm.lifetime.start.p0(ptr %alloc.var) %vFrame = call noalias nonnull ptr @llvm.coro.begin(token %id, ptr %alloc) %save = call token @llvm.coro.save(ptr null) @@ -77,7 +77,7 @@ await.suspend: ] await.ready: call void @consume(ptr %alloc.var) - call void @llvm.lifetime.end.p0(i64 1, ptr %alloc.var) + call void @llvm.lifetime.end.p0(ptr %alloc.var) br label %exit cleanup: @@ -114,8 +114,8 @@ declare ptr @llvm.coro.subfn.addr(ptr nocapture readonly, i8) #1 declare ptr @malloc(i64) declare void @delete(ptr nonnull) #2 declare void @consume(ptr) -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) declare ptr @await_suspend_function(ptr %awaiter, ptr %hdl) attributes #0 = { presplitcoroutine } diff --git a/llvm/test/Transforms/Coroutines/coro-split-no-lifetime.ll b/llvm/test/Transforms/Coroutines/coro-split-no-lifetime.ll index 157807d..12d6564 100644 --- a/llvm/test/Transforms/Coroutines/coro-split-no-lifetime.ll +++ b/llvm/test/Transforms/Coroutines/coro-split-no-lifetime.ll @@ -12,11 +12,11 @@ entry: br i1 %n, label %flag_true, label %flag_false flag_true: - call void @llvm.lifetime.start.p0(i64 8, ptr %x) + call void @llvm.lifetime.start.p0(ptr %x) br label %merge flag_false: - call void @llvm.lifetime.start.p0(i64 8, ptr %y) + call void @llvm.lifetime.start.p0(ptr %y) br label %merge merge: @@ -27,8 +27,8 @@ merge: i8 1, label %cleanup] resume: call void @print(ptr %phi) - call void @llvm.lifetime.end.p0(i64 8, ptr %x) - call void @llvm.lifetime.end.p0(i64 8, ptr %y) + call void @llvm.lifetime.end.p0(ptr %x) + call void @llvm.lifetime.end.p0(ptr %y) br label %cleanup cleanup: @@ -54,8 +54,8 @@ declare i1 @llvm.coro.alloc(token) declare ptr @llvm.coro.begin(token, ptr) declare i1 @llvm.coro.end(ptr, i1, token) -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) declare void @print(ptr) declare noalias ptr @malloc(i32) diff --git a/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-01.ll b/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-01.ll index 1d0cf94..a5a2bcf 100644 --- a/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-01.ll +++ b/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-01.ll @@ -17,7 +17,7 @@ entry: %ref.tmp7 = alloca %"struct.lean_future<int>::Awaiter", align 8 %testval = alloca i32 ; lifetime of %testval starts here, but not used until await.ready. - call void @llvm.lifetime.start.p0(i64 4, ptr %testval) + call void @llvm.lifetime.start.p0(ptr %testval) %id = call token @llvm.coro.id(i32 0, ptr null, ptr null, ptr null) %alloc = call ptr @malloc(i64 16) #3 %vFrame = call noalias nonnull ptr @llvm.coro.begin(token %id, ptr %alloc) @@ -33,7 +33,7 @@ await.ready: %val = load i32, ptr %ref.tmp7 %test = load i32, ptr %testval call void @print(i32 %test) - call void @llvm.lifetime.end.p0(i64 4, ptr %testval) + call void @llvm.lifetime.end.p0(ptr %testval) call void @print(i32 %val) br label %exit exit: @@ -43,11 +43,11 @@ exit: ; CHECK-LABEL: @a.resume( ; CHECK: %testval = alloca i32, align 4 -; CHECK: call void @llvm.lifetime.start.p0(i64 4, ptr %testval) +; CHECK: call void @llvm.lifetime.start.p0(ptr %testval) ; CHECK-NEXT: %val = load i32, ptr %ref.tmp7 ; CHECK-NEXT: %test = load i32, ptr %testval ; CHECK-NEXT: call void @print(i32 %test) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr %testval) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %testval) ; CHECK-NEXT: call void @print(i32 %val) ; CHECK-NEXT: ret void @@ -56,7 +56,7 @@ entry: %ref.tmp7 = alloca %"struct.lean_future<int>::Awaiter", align 8 %testval = alloca i32 ; lifetime of %testval starts here, but not used until await.ready. - call void @llvm.lifetime.start.p0(i64 4, ptr %testval) + call void @llvm.lifetime.start.p0(ptr %testval) %id = call token @llvm.coro.id(i32 0, ptr null, ptr null, ptr null) %alloc = call ptr @malloc(i64 16) #3 %vFrame = call noalias nonnull ptr @llvm.coro.begin(token %id, ptr %alloc) @@ -72,7 +72,7 @@ await.ready: %val = load i32, ptr %ref.tmp7 %test = load i32, ptr %testval call void @print(i32 %test) - call void @llvm.lifetime.end.p0(i64 4, ptr %testval) + call void @llvm.lifetime.end.p0(ptr %testval) call void @print(i32 %val) br label %exit exit: @@ -92,5 +92,5 @@ declare i8 @llvm.coro.suspend(token, i1) #3 declare void @"\01??3@YAXPEAX@Z"(ptr) local_unnamed_addr #10 declare ptr @llvm.coro.free(token, ptr nocapture readonly) #2 declare i1 @llvm.coro.end(ptr, i1, token) #3 -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #4 -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #4 +declare void @llvm.lifetime.start.p0(ptr nocapture) #4 +declare void @llvm.lifetime.end.p0(ptr nocapture) #4 diff --git a/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-02.ll b/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-02.ll index 38a2a33..abc91c3 100644 --- a/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-02.ll +++ b/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-02.ll @@ -15,7 +15,7 @@ entry: %ref.tmp7 = alloca %"struct.lean_future<int>::Awaiter", align 8 %testval = alloca i32 ; lifetime of %testval starts here, but not used until await.ready. - call void @llvm.lifetime.start.p0(i64 4, ptr %testval) + call void @llvm.lifetime.start.p0(ptr %testval) %id = call token @llvm.coro.id(i32 0, ptr null, ptr null, ptr null) %alloc = call ptr @malloc(i64 16) #3 %vFrame = call noalias nonnull ptr @llvm.coro.begin(token %id, ptr %alloc) @@ -44,7 +44,7 @@ await.ready: after.await: %test1 = load i32, ptr %testval call void @print(i32 %test1) - call void @llvm.lifetime.end.p0(i64 4, ptr %testval) + call void @llvm.lifetime.end.p0(ptr %testval) br label %exit exit: @@ -54,7 +54,7 @@ exit: ; CHECK-LABEL: @a.resume( ; CHECK: %[[VAL:testval.+]] = getelementptr inbounds %a.Frame -; CHECK-NOT: call void @llvm.lifetime.start.p0(i64 4, ptr %{{.*}}) +; CHECK-NOT: call void @llvm.lifetime.start.p0(ptr %{{.*}}) ; CHECK: %test = load i32, ptr %[[VAL]] declare token @llvm.coro.id(i32, ptr readnone, ptr nocapture readonly, ptr) @@ -69,5 +69,5 @@ declare i8 @llvm.coro.suspend(token, i1) #3 declare void @"\01??3@YAXPEAX@Z"(ptr) local_unnamed_addr #10 declare ptr @llvm.coro.free(token, ptr nocapture readonly) #2 declare i1 @llvm.coro.end(ptr, i1, token) #3 -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #4 -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #4 +declare void @llvm.lifetime.start.p0(ptr nocapture) #4 +declare void @llvm.lifetime.end.p0(ptr nocapture) #4 diff --git a/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-03.ll b/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-03.ll index de377a6..efd1adf 100644 --- a/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-03.ll +++ b/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-03.ll @@ -17,7 +17,7 @@ entry: %ref.tmp7 = alloca %"struct.lean_future<int>::Awaiter", align 8 %testval = alloca %i8.array ; lifetime of %testval starts here, but not used until await.ready. - call void @llvm.lifetime.start.p0(i64 100, ptr %testval) + call void @llvm.lifetime.start.p0(ptr %testval) %id = call token @llvm.coro.id(i32 0, ptr null, ptr null, ptr null) %alloc = call ptr @malloc(i64 16) #3 %vFrame = call noalias nonnull ptr @llvm.coro.begin(token %id, ptr %alloc) @@ -32,7 +32,7 @@ await.ready: %StrayCoroSave = call token @llvm.coro.save(ptr null) %val = load i32, ptr %ref.tmp7 call void @consume.i8.array(ptr %testval) - call void @llvm.lifetime.end.p0(i64 100, ptr %testval) + call void @llvm.lifetime.end.p0(ptr %testval) call void @print(i32 %val) br label %exit exit: @@ -41,10 +41,10 @@ exit: } ; CHECK-LABEL: @a.gep.resume( ; CHECK: %testval = alloca %i8.array -; CHECK: call void @llvm.lifetime.start.p0(i64 100, ptr %testval) +; CHECK: call void @llvm.lifetime.start.p0(ptr %testval) ; CHECK-NEXT: %val = load i32, ptr %ref.tmp7 ; CHECK-NEXT: call void @consume.i8.array(ptr %testval) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 100, ptr %testval) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %testval) ; CHECK-NEXT: call void @print(i32 %val) ; CHECK-NEXT: ret void @@ -60,5 +60,5 @@ declare i8 @llvm.coro.suspend(token, i1) #3 declare void @"\01??3@YAXPEAX@Z"(ptr) local_unnamed_addr #10 declare ptr @llvm.coro.free(token, ptr nocapture readonly) #2 declare i1 @llvm.coro.end(ptr, i1, token) #3 -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #4 -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #4 +declare void @llvm.lifetime.start.p0(ptr nocapture) #4 +declare void @llvm.lifetime.end.p0(ptr nocapture) #4 diff --git a/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-04.ll b/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-04.ll index 8210455..af5aa8a 100644 --- a/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-04.ll +++ b/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-04.ll @@ -15,7 +15,7 @@ entry: %ref.tmp7 = alloca %"struct.lean_future<int>::Awaiter", align 8 %testval = alloca i8 ; lifetime of %testval starts here, but not used until await.ready. - call void @llvm.lifetime.start.p0(i64 1, ptr %testval) + call void @llvm.lifetime.start.p0(ptr %testval) %id = call token @llvm.coro.id(i32 0, ptr null, ptr null, ptr null) %alloc = call ptr @malloc(i64 16) #3 %vFrame = call noalias nonnull ptr @llvm.coro.begin(token %id, ptr %alloc) @@ -31,7 +31,7 @@ await.ready: %val = load i32, ptr %ref.tmp7 %test = load i8, ptr %testval call void @consume.i8(i8 %test) - call void @llvm.lifetime.end.p0(i64 1, ptr %testval) + call void @llvm.lifetime.end.p0(ptr %testval) call void @print(i32 %val) br label %exit exit: @@ -41,11 +41,11 @@ exit: ; CHECK-LABEL: @a.resume( ; CHECK: %testval = alloca i8, align 1 -; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %testval) +; CHECK: call void @llvm.lifetime.start.p0(ptr %testval) ; CHECK-NEXT: %val = load i32, ptr %ref.tmp7 ; CHECK-NEXT: %test = load i8, ptr %testval ; CHECK-NEXT: call void @consume.i8(i8 %test) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr %testval) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %testval) ; CHECK-NEXT: call void @print(i32 %val) ; CHECK-NEXT: ret void @@ -62,5 +62,5 @@ declare i8 @llvm.coro.suspend(token, i1) #3 declare void @"\01??3@YAXPEAX@Z"(ptr) local_unnamed_addr #10 declare ptr @llvm.coro.free(token, ptr nocapture readonly) #2 declare i1 @llvm.coro.end(ptr, i1, token) #3 -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #4 -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #4 +declare void @llvm.lifetime.start.p0(ptr nocapture) #4 +declare void @llvm.lifetime.end.p0(ptr nocapture) #4 diff --git a/llvm/test/Transforms/Coroutines/coro-transform-must-elide.ll b/llvm/test/Transforms/Coroutines/coro-transform-must-elide.ll index d2c4f57..4eec7ed 100644 --- a/llvm/test/Transforms/Coroutines/coro-transform-must-elide.ll +++ b/llvm/test/Transforms/Coroutines/coro-transform-must-elide.ll @@ -61,11 +61,11 @@ entry: ret ptr %task ; CHECK: %[[TASK:.+]] = alloca %struct.Task, align 8 ; CHECK-NEXT: %[[FRAME:.+]] = alloca [32 x i8], align 8 - ; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr %[[TASK]]) + ; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %[[TASK]]) ; CHECK-NEXT: %[[ID:.+]] = call token @llvm.coro.id(i32 0, ptr null, ptr @callee, ptr @callee.resumers) ; CHECK-NEXT: %[[HDL:.+]] = call ptr @llvm.coro.begin(token %[[ID]], ptr null) ; CHECK-NEXT: store ptr %[[HDL]], ptr %[[TASK]], align 8 - ; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr %[[TASK]]) + ; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %[[TASK]]) ; CHECK-NEXT: ret ptr %[[TASK]] } diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/alloca.ll b/llvm/test/Transforms/CorrelatedValuePropagation/alloca.ll index 9e47bd2..390e96e 100644 --- a/llvm/test/Transforms/CorrelatedValuePropagation/alloca.ll +++ b/llvm/test/Transforms/CorrelatedValuePropagation/alloca.ll @@ -12,14 +12,14 @@ target triple = "x86_64-unknown-linux-gnu" @.str = private unnamed_addr constant [8 x i8] c"a = %l\0A\00", align 1 ; Function Attrs: argmemonly nounwind -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) declare void @hoo(ptr) declare i32 @printf(ptr nocapture readonly, ...) ; Function Attrs: argmemonly nounwind -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) define void @goo(i32 %N, ptr %b) { entry: @@ -32,12 +32,12 @@ for.cond: ; preds = %for.body, %entry br i1 %cmp, label %for.body, label %for.end for.body: ; preds = %for.cond - call void @llvm.lifetime.start.p0(i64 8, ptr %a.i) + call void @llvm.lifetime.start.p0(ptr %a.i) call void @hoo(ptr %a.i) call void @hoo(ptr %b) %tmp1 = load volatile i64, ptr %a.i, align 8 %call.i = call i32 (ptr, ...) @printf(ptr @.str, i64 %tmp1) - call void @llvm.lifetime.end.p0(i64 8, ptr %a.i) + call void @llvm.lifetime.end.p0(ptr %a.i) %inc = add nsw i32 %i.0, 1 br label %for.cond diff --git a/llvm/test/Transforms/DCE/basic.ll b/llvm/test/Transforms/DCE/basic.ll index 1a3b12e..28772f0 100644 --- a/llvm/test/Transforms/DCE/basic.ll +++ b/llvm/test/Transforms/DCE/basic.ll @@ -10,8 +10,8 @@ define void @test() { ret void } -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind +declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind +declare void @llvm.lifetime.end.p0(ptr nocapture) nounwind ; CHECK-LABEL: @test_lifetime_alloca define i32 @test_lifetime_alloca() { @@ -21,8 +21,8 @@ define i32 @test_lifetime_alloca() { ; CHECK-NOT: llvm.lifetime.start ; CHECK-NOT: llvm.lifetime.end %i = alloca i8, align 4 - call void @llvm.lifetime.start.p0(i64 -1, ptr %i) - call void @llvm.lifetime.end.p0(i64 -1, ptr %i) + call void @llvm.lifetime.start.p0(ptr %i) + call void @llvm.lifetime.end.p0(ptr %i) ret i32 0 } diff --git a/llvm/test/Transforms/DeadStoreElimination/batchaa-caching-new-pointers.ll b/llvm/test/Transforms/DeadStoreElimination/batchaa-caching-new-pointers.ll index ee9bd69..4ec69bc 100644 --- a/llvm/test/Transforms/DeadStoreElimination/batchaa-caching-new-pointers.ll +++ b/llvm/test/Transforms/DeadStoreElimination/batchaa-caching-new-pointers.ll @@ -12,17 +12,17 @@ define ptr @foo(ptr noundef %ptr) { ; CHECK-LABEL: define ptr @foo( ; CHECK-SAME: ptr noundef [[PTR:%.*]]) { ; CHECK-NEXT: [[STRUCT_ALLOCA:%.*]] = alloca [[STRUCT_TYPE:%.*]], align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 56, ptr nonnull [[STRUCT_ALLOCA]]) #[[ATTR6:[0-9]+]] +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[STRUCT_ALLOCA]]) #[[ATTR6:[0-9]+]] ; CHECK-NEXT: [[STRUCT_BYTE_8:%.*]] = getelementptr inbounds i8, ptr [[STRUCT_ALLOCA]], i64 8 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[STRUCT_BYTE_8]], i64 4 ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr noundef nonnull align 4 [[TMP1]], i8 42, i64 4, i1 false) ; CHECK-NEXT: store i32 43, ptr [[STRUCT_BYTE_8]], align 4 ; CHECK-NEXT: [[RET:%.*]] = load ptr, ptr [[STRUCT_BYTE_8]], align 8 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 56, ptr nonnull [[STRUCT_ALLOCA]]) #[[ATTR6]] +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[STRUCT_ALLOCA]]) #[[ATTR6]] ; CHECK-NEXT: ret ptr [[RET]] ; %struct.alloca = alloca %struct.type, align 8 - call void @llvm.lifetime.start.p0(i64 56, ptr nonnull %struct.alloca) nounwind + call void @llvm.lifetime.start.p0(ptr nonnull %struct.alloca) nounwind %struct.byte.8 = getelementptr inbounds i8, ptr %struct.alloca, i64 8 ; Set %struct.alloca[8, 16) to 42. call void @llvm.memset.p0.i64(ptr noundef nonnull align 4 %struct.byte.8, i8 42, i64 8, i1 false) @@ -33,7 +33,7 @@ define ptr @foo(ptr noundef %ptr) { store i32 44, ptr %struct.byte.4, align 4 ; Return %struct.alloca[8, 16). %ret = load ptr, ptr %struct.byte.8 - call void @llvm.lifetime.end.p0(i64 56, ptr nonnull %struct.alloca) nounwind + call void @llvm.lifetime.end.p0(ptr nonnull %struct.alloca) nounwind ret ptr %ret } @@ -44,7 +44,7 @@ define ptr @foo(ptr noundef %ptr) { define ptr @foo_with_removable_malloc() { ; CHECK-LABEL: define ptr @foo_with_removable_malloc() { ; CHECK-NEXT: [[STRUCT_ALLOCA:%.*]] = alloca [[STRUCT_TYPE:%.*]], align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 56, ptr nonnull [[STRUCT_ALLOCA]]) #[[ATTR6]] +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[STRUCT_ALLOCA]]) #[[ATTR6]] ; CHECK-NEXT: [[STRUCT_BYTE_4:%.*]] = getelementptr inbounds i8, ptr [[STRUCT_ALLOCA]], i64 4 ; CHECK-NEXT: [[STRUCT_BYTE_8:%.*]] = getelementptr inbounds i8, ptr [[STRUCT_ALLOCA]], i64 8 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[STRUCT_BYTE_8]], i64 4 @@ -53,11 +53,11 @@ define ptr @foo_with_removable_malloc() { ; CHECK-NEXT: [[RET:%.*]] = load ptr, ptr [[STRUCT_BYTE_8]], align 8 ; CHECK-NEXT: call void @readnone(ptr [[STRUCT_BYTE_4]]) ; CHECK-NEXT: call void @readnone(ptr [[STRUCT_BYTE_8]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 56, ptr nonnull [[STRUCT_ALLOCA]]) #[[ATTR6]] +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[STRUCT_ALLOCA]]) #[[ATTR6]] ; CHECK-NEXT: ret ptr [[RET]] ; %struct.alloca = alloca %struct.type, align 8 - call void @llvm.lifetime.start.p0(i64 56, ptr nonnull %struct.alloca) nounwind + call void @llvm.lifetime.start.p0(ptr nonnull %struct.alloca) nounwind %struct.byte.4 = getelementptr inbounds i8, ptr %struct.alloca, i64 4 %struct.byte.8 = getelementptr inbounds i8, ptr %struct.alloca, i64 8 @@ -79,7 +79,7 @@ define ptr @foo_with_removable_malloc() { %ret = load ptr, ptr %struct.byte.8 call void @readnone(ptr %struct.byte.4); call void @readnone(ptr %struct.byte.8); - call void @llvm.lifetime.end.p0(i64 56, ptr nonnull %struct.alloca) nounwind + call void @llvm.lifetime.end.p0(ptr nonnull %struct.alloca) nounwind ret ptr %ret } @@ -87,7 +87,7 @@ define ptr @foo_with_removable_malloc_free() { ; CHECK-LABEL: define ptr @foo_with_removable_malloc_free() { ; CHECK-NEXT: [[STRUCT_ALLOCA:%.*]] = alloca [[STRUCT_TYPE:%.*]], align 8 ; CHECK-NEXT: [[M1:%.*]] = tail call ptr @malloc(i64 4) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 56, ptr nonnull [[STRUCT_ALLOCA]]) #[[ATTR6]] +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[STRUCT_ALLOCA]]) #[[ATTR6]] ; CHECK-NEXT: [[STRUCT_BYTE_4:%.*]] = getelementptr inbounds i8, ptr [[STRUCT_ALLOCA]], i64 4 ; CHECK-NEXT: [[STRUCT_BYTE_8:%.*]] = getelementptr inbounds i8, ptr [[STRUCT_ALLOCA]], i64 8 ; CHECK-NEXT: [[M2:%.*]] = tail call ptr @malloc(i64 4) @@ -99,12 +99,12 @@ define ptr @foo_with_removable_malloc_free() { ; CHECK-NEXT: [[RET:%.*]] = load ptr, ptr [[STRUCT_BYTE_8]], align 8 ; CHECK-NEXT: call void @readnone(ptr [[STRUCT_BYTE_4]]) ; CHECK-NEXT: call void @readnone(ptr [[STRUCT_BYTE_8]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 56, ptr nonnull [[STRUCT_ALLOCA]]) #[[ATTR6]] +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[STRUCT_ALLOCA]]) #[[ATTR6]] ; CHECK-NEXT: ret ptr [[RET]] ; %struct.alloca = alloca %struct.type, align 8 %m1 = tail call ptr @malloc(i64 4) - call void @llvm.lifetime.start.p0(i64 56, ptr nonnull %struct.alloca) nounwind + call void @llvm.lifetime.start.p0(ptr nonnull %struct.alloca) nounwind %struct.byte.4 = getelementptr inbounds i8, ptr %struct.alloca, i64 4 %struct.byte.8 = getelementptr inbounds i8, ptr %struct.alloca, i64 8 @@ -126,14 +126,14 @@ define ptr @foo_with_removable_malloc_free() { %ret = load ptr, ptr %struct.byte.8 call void @readnone(ptr %struct.byte.4); call void @readnone(ptr %struct.byte.8); - call void @llvm.lifetime.end.p0(i64 56, ptr nonnull %struct.alloca) nounwind + call void @llvm.lifetime.end.p0(ptr nonnull %struct.alloca) nounwind ret ptr %ret } define ptr @foo_with_malloc_to_calloc() { ; CHECK-LABEL: define ptr @foo_with_malloc_to_calloc() { ; CHECK-NEXT: [[STRUCT_ALLOCA:%.*]] = alloca [[STRUCT_TYPE:%.*]], align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 56, ptr nonnull [[STRUCT_ALLOCA]]) #[[ATTR6]] +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[STRUCT_ALLOCA]]) #[[ATTR6]] ; CHECK-NEXT: [[STRUCT_BYTE_8:%.*]] = getelementptr inbounds i8, ptr [[STRUCT_ALLOCA]], i64 8 ; CHECK-NEXT: [[STRUCT_BYTE_4:%.*]] = getelementptr inbounds i8, ptr [[STRUCT_ALLOCA]], i64 4 ; CHECK-NEXT: [[CALLOC1:%.*]] = call ptr @calloc(i64 1, i64 4) @@ -144,13 +144,13 @@ define ptr @foo_with_malloc_to_calloc() { ; CHECK-NEXT: [[RET:%.*]] = load ptr, ptr [[STRUCT_BYTE_8]], align 8 ; CHECK-NEXT: call void @readnone(ptr [[STRUCT_BYTE_4]]) ; CHECK-NEXT: call void @readnone(ptr [[STRUCT_BYTE_8]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 56, ptr nonnull [[STRUCT_ALLOCA]]) #[[ATTR6]] +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[STRUCT_ALLOCA]]) #[[ATTR6]] ; CHECK-NEXT: call void @use(ptr [[CALLOC1]]) ; CHECK-NEXT: call void @use(ptr [[CALLOC]]) ; CHECK-NEXT: ret ptr [[RET]] ; %struct.alloca = alloca %struct.type, align 8 - call void @llvm.lifetime.start.p0(i64 56, ptr nonnull %struct.alloca) nounwind + call void @llvm.lifetime.start.p0(ptr nonnull %struct.alloca) nounwind %struct.byte.8 = getelementptr inbounds i8, ptr %struct.alloca, i64 8 %struct.byte.4 = getelementptr inbounds i8, ptr %struct.alloca, i64 4 @@ -172,15 +172,15 @@ define ptr @foo_with_malloc_to_calloc() { %ret = load ptr, ptr %struct.byte.8 call void @readnone(ptr %struct.byte.4); call void @readnone(ptr %struct.byte.8); - call void @llvm.lifetime.end.p0(i64 56, ptr nonnull %struct.alloca) nounwind + call void @llvm.lifetime.end.p0(ptr nonnull %struct.alloca) nounwind call void @use(ptr %m1) call void @use(ptr %m2) ret ptr %ret } declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) declare noalias ptr @malloc(i64) willreturn allockind("alloc,uninitialized") "alloc-family"="malloc" declare void @readnone(ptr) readnone nounwind diff --git a/llvm/test/Transforms/DeadStoreElimination/captures-before-load.ll b/llvm/test/Transforms/DeadStoreElimination/captures-before-load.ll index 7d827fa..56c84c7 100644 --- a/llvm/test/Transforms/DeadStoreElimination/captures-before-load.ll +++ b/llvm/test/Transforms/DeadStoreElimination/captures-before-load.ll @@ -865,7 +865,7 @@ exit: ret void } -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #0 +declare void @llvm.lifetime.start.p0(ptr nocapture) #0 declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg) #1 declare void @use.i64(i64) @@ -883,7 +883,7 @@ define i64 @test_a_not_captured_at_all(ptr %ptr, ptr %ptr.2, i1 %c) { ; CHECK-NEXT: call void @use.i64(i64 [[LV_2]]) ; CHECK-NEXT: br label [[EXIT]] ; CHECK: exit: -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]]) ; CHECK-NEXT: call void @clobber() ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr [[A]], i8 0, i64 8, i1 false) ; CHECK-NEXT: [[L:%.*]] = load i64, ptr [[A]], align 4 @@ -902,7 +902,7 @@ then: br label %exit exit: - call void @llvm.lifetime.start.p0(i64 8, ptr %a) + call void @llvm.lifetime.start.p0(ptr %a) store i64 99, ptr %a call void @clobber() call void @llvm.memset.p0.i64(ptr %a, i8 0, i64 8, i1 false) @@ -1112,7 +1112,7 @@ else: declare void @capture_and_clobber_multiple(ptr, ptr) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) define i64 @earliest_escape_ptrtoint(ptr %p.1) { ; CHECK-LABEL: @earliest_escape_ptrtoint( @@ -1122,7 +1122,7 @@ define i64 @earliest_escape_ptrtoint(ptr %p.1) { ; CHECK-NEXT: [[LV_1:%.*]] = load ptr, ptr [[P_1:%.*]], align 8 ; CHECK-NEXT: [[LV_2:%.*]] = load i64, ptr [[LV_1]], align 4 ; CHECK-NEXT: store ptr [[A_1]], ptr [[P_1]], align 8 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[A_2]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_2]]) ; CHECK-NEXT: ret i64 [[LV_2]] ; entry: @@ -1134,7 +1134,7 @@ entry: store ptr %a.1, ptr %p.1, align 8 %int = ptrtoint ptr %a.2 to i64 store i64 %int , ptr %a.2, align 8 - call void @llvm.lifetime.end.p0(i64 8, ptr %a.2) + call void @llvm.lifetime.end.p0(ptr %a.2) ret i64 %lv.2 } diff --git a/llvm/test/Transforms/DeadStoreElimination/dominate.ll b/llvm/test/Transforms/DeadStoreElimination/dominate.ll index 262d16e..7e3ddb3 100644 --- a/llvm/test/Transforms/DeadStoreElimination/dominate.ll +++ b/llvm/test/Transforms/DeadStoreElimination/dominate.ll @@ -8,12 +8,12 @@ bb1: br label %bb3 bb2: - call void @llvm.lifetime.end.p0(i64 -1, ptr %memtmp3.i) + call void @llvm.lifetime.end.p0(ptr %memtmp3.i) br label %bb3 bb3: call void @bar() - call void @llvm.lifetime.end.p0(i64 -1, ptr %memtmp3.i) + call void @llvm.lifetime.end.p0(ptr %memtmp3.i) br label %bb4 bb4: @@ -21,4 +21,4 @@ bb4: } -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind +declare void @llvm.lifetime.end.p0(ptr nocapture) nounwind diff --git a/llvm/test/Transforms/DeadStoreElimination/libcalls.ll b/llvm/test/Transforms/DeadStoreElimination/libcalls.ll index 27ad639..8225e14 100644 --- a/llvm/test/Transforms/DeadStoreElimination/libcalls.ll +++ b/llvm/test/Transforms/DeadStoreElimination/libcalls.ll @@ -56,14 +56,14 @@ define void @test3(ptr %src) { define void @test_strcat_with_lifetime(ptr %src) { ; CHECK-LABEL: @test_strcat_with_lifetime( ; CHECK-NEXT: [[B:%.*]] = alloca [16 x i8], align 1 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[B]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[B]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[B]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[B]]) ; CHECK-NEXT: ret void ; %B = alloca [16 x i8] - call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %B) + call void @llvm.lifetime.start.p0(ptr nonnull %B) %call = call ptr @strcat(ptr %B, ptr %src) - call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %B) + call void @llvm.lifetime.end.p0(ptr nonnull %B) ret void } @@ -344,61 +344,61 @@ entry: define void @dse_strcpy(ptr nocapture readonly %src) { ; CHECK-LABEL: @dse_strcpy( ; CHECK-NEXT: [[A:%.*]] = alloca [256 x i8], align 16 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 256, ptr nonnull [[A]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 256, ptr nonnull [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[A]]) ; CHECK-NEXT: ret void ; %a = alloca [256 x i8], align 16 - call void @llvm.lifetime.start.p0(i64 256, ptr nonnull %a) + call void @llvm.lifetime.start.p0(ptr nonnull %a) call ptr @strcpy(ptr nonnull %a, ptr nonnull dereferenceable(1) %src) - call void @llvm.lifetime.end.p0(i64 256, ptr nonnull %a) + call void @llvm.lifetime.end.p0(ptr nonnull %a) ret void } define void @dse_strncpy(ptr nocapture readonly %src) { ; CHECK-LABEL: @dse_strncpy( ; CHECK-NEXT: [[A:%.*]] = alloca [256 x i8], align 16 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 256, ptr nonnull [[A]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 256, ptr nonnull [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[A]]) ; CHECK-NEXT: ret void ; %a = alloca [256 x i8], align 16 - call void @llvm.lifetime.start.p0(i64 256, ptr nonnull %a) + call void @llvm.lifetime.start.p0(ptr nonnull %a) call ptr @strncpy(ptr nonnull %a, ptr nonnull dereferenceable(1) %src, i64 6) - call void @llvm.lifetime.end.p0(i64 256, ptr nonnull %a) + call void @llvm.lifetime.end.p0(ptr nonnull %a) ret void } define void @dse_strcat(ptr nocapture readonly %src) { ; CHECK-LABEL: @dse_strcat( ; CHECK-NEXT: [[A:%.*]] = alloca [256 x i8], align 16 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 256, ptr nonnull [[A]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 256, ptr nonnull [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[A]]) ; CHECK-NEXT: ret void ; %a = alloca [256 x i8], align 16 - call void @llvm.lifetime.start.p0(i64 256, ptr nonnull %a) + call void @llvm.lifetime.start.p0(ptr nonnull %a) call ptr @strcat(ptr nonnull %a, ptr nonnull dereferenceable(1) %src) - call void @llvm.lifetime.end.p0(i64 256, ptr nonnull %a) + call void @llvm.lifetime.end.p0(ptr nonnull %a) ret void } define void @dse_strncat(ptr nocapture readonly %src) { ; CHECK-LABEL: @dse_strncat( ; CHECK-NEXT: [[A:%.*]] = alloca [256 x i8], align 16 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 256, ptr nonnull [[A]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 256, ptr nonnull [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[A]]) ; CHECK-NEXT: ret void ; %a = alloca [256 x i8], align 16 - call void @llvm.lifetime.start.p0(i64 256, ptr nonnull %a) + call void @llvm.lifetime.start.p0(ptr nonnull %a) call ptr @strncat(ptr nonnull %a, ptr nonnull dereferenceable(1) %src, i64 6) - call void @llvm.lifetime.end.p0(i64 256, ptr nonnull %a) + call void @llvm.lifetime.end.p0(ptr nonnull %a) ret void } -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) nounwind diff --git a/llvm/test/Transforms/DeadStoreElimination/lifetime.ll b/llvm/test/Transforms/DeadStoreElimination/lifetime.ll index f2a372ea..3d74c84 100644 --- a/llvm/test/Transforms/DeadStoreElimination/lifetime.ll +++ b/llvm/test/Transforms/DeadStoreElimination/lifetime.ll @@ -3,20 +3,20 @@ target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128" -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind +declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind +declare void @llvm.lifetime.end.p0(ptr nocapture) nounwind declare void @llvm.memset.p0.i8(ptr nocapture, i8, i8, i1) nounwind define void @test1() { ; CHECK-LABEL: @test1( ; CHECK-NEXT: [[A:%.*]] = alloca i8, align 1 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]]) ; CHECK-NEXT: ret void ; %A = alloca i8 store i8 0, ptr %A ;; Written to by memset - call void @llvm.lifetime.end.p0(i64 1, ptr %A) + call void @llvm.lifetime.end.p0(ptr %A) call void @llvm.memset.p0.i8(ptr %A, i8 0, i8 -1, i1 false) @@ -26,14 +26,14 @@ define void @test1() { define void @test2(ptr %P) { ; CHECK-LABEL: @test2( ; CHECK-NEXT: [[Q:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[Q]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[Q]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[Q]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[Q]]) ; CHECK-NEXT: ret void ; %Q = alloca i32 - call void @llvm.lifetime.start.p0(i64 4, ptr %Q) + call void @llvm.lifetime.start.p0(ptr %Q) store i32 0, ptr %Q ;; This store is dead. - call void @llvm.lifetime.end.p0(i64 4, ptr %Q) + call void @llvm.lifetime.end.p0(ptr %Q) ret void } diff --git a/llvm/test/Transforms/DeadStoreElimination/memcpy-lifetimes.ll b/llvm/test/Transforms/DeadStoreElimination/memcpy-lifetimes.ll index 7dd8e41..264e816 100644 --- a/llvm/test/Transforms/DeadStoreElimination/memcpy-lifetimes.ll +++ b/llvm/test/Transforms/DeadStoreElimination/memcpy-lifetimes.ll @@ -15,7 +15,7 @@ define ptr @alloc_tree() { ; CHECK-LABEL: @alloc_tree( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[FVAL:%.*]] = alloca [4 x ptr], align 16 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[FVAL]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[FVAL]]) ; CHECK-NEXT: [[CALL:%.*]] = tail call dereferenceable_or_null(192) ptr @malloc(i64 192) ; CHECK-NEXT: [[CALL3:%.*]] = tail call ptr @alloc(ptr [[CALL]]) ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x ptr], ptr [[FVAL]], i64 0, i64 3 @@ -29,12 +29,12 @@ define ptr @alloc_tree() { ; CHECK-NEXT: [[CALL3_3:%.*]] = tail call ptr @alloc(ptr [[CALL]]) ; CHECK-NEXT: store ptr [[CALL3_3]], ptr [[FVAL]], align 16 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 8 dereferenceable(32) [[CALL]], ptr nonnull align 16 dereferenceable(32) [[FVAL]], i64 32, i1 false) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[FVAL]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[FVAL]]) ; CHECK-NEXT: ret ptr [[CALL]] ; entry: %fval = alloca [4 x ptr], align 16 - call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %fval) #7 + call void @llvm.lifetime.start.p0(ptr nonnull %fval) #7 %call = tail call dereferenceable_or_null(192) ptr @malloc(i64 192) #8 %call3 = tail call ptr @alloc(ptr %call) %arrayidx = getelementptr inbounds [4 x ptr], ptr %fval, i64 0, i64 3 @@ -48,11 +48,11 @@ entry: %call3.3 = tail call ptr @alloc(ptr %call) store ptr %call3.3, ptr %fval, align 16 call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 8 dereferenceable(32) %call, ptr nonnull align 16 dereferenceable(32) %fval, i64 32, i1 false) - call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %fval) #7 + call void @llvm.lifetime.end.p0(ptr nonnull %fval) #7 ret ptr %call } -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) declare noalias ptr @malloc(i64) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg) diff --git a/llvm/test/Transforms/DeadStoreElimination/multiblock-loop-carried-dependence.ll b/llvm/test/Transforms/DeadStoreElimination/multiblock-loop-carried-dependence.ll index f3f5cb1..112e9f4 100644 --- a/llvm/test/Transforms/DeadStoreElimination/multiblock-loop-carried-dependence.ll +++ b/llvm/test/Transforms/DeadStoreElimination/multiblock-loop-carried-dependence.ll @@ -103,7 +103,7 @@ define void @test.2() { ; CHECK-NEXT: [[C_2:%.*]] = icmp slt i64 [[IV_2_NEXT]], 100 ; CHECK-NEXT: br i1 [[C_2]], label [[LOOP_2]], label [[EXIT:%.*]] ; CHECK: exit: -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 400, ptr nonnull [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[A]]) ; CHECK-NEXT: ret void ; entry: @@ -136,11 +136,11 @@ loop.2: br i1 %c.2, label %loop.2, label %exit exit: - call void @llvm.lifetime.end.p0(i64 400, ptr nonnull %A) #5 + call void @llvm.lifetime.end.p0(ptr nonnull %A) #5 ret void } -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) ; Make sure `store i32 10, ptr %ptr.2` in %cond.store is not removed. The ; stored value may be read by `%use = load i32, ptr %ptr.1` in a future @@ -171,7 +171,7 @@ define void@test.3() { ; CHECK-NEXT: [[DEPTH_1_BE]] = phi i32 [ [[SUB]], [[COND_READ]] ], [ [[INC]], [[COND_STORE]] ] ; CHECK-NEXT: br label [[LOOP_HEADER]] ; CHECK: cleanup: -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 48, ptr nonnull [[NODESTACK]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[NODESTACK]]) ; CHECK-NEXT: ret void ; entry: @@ -203,7 +203,7 @@ loop.latch: br label %loop.header cleanup: ; preds = %while.body, %while.end, %entry - call void @llvm.lifetime.end.p0(i64 48, ptr nonnull %nodeStack) #3 + call void @llvm.lifetime.end.p0(ptr nonnull %nodeStack) #3 ret void } diff --git a/llvm/test/Transforms/DeadStoreElimination/multiblock-malloc-free.ll b/llvm/test/Transforms/DeadStoreElimination/multiblock-malloc-free.ll index d32d562..8ecc7939 100644 --- a/llvm/test/Transforms/DeadStoreElimination/multiblock-malloc-free.ll +++ b/llvm/test/Transforms/DeadStoreElimination/multiblock-malloc-free.ll @@ -4,8 +4,8 @@ target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64" declare void @unknown_func() -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind +declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind +declare void @llvm.lifetime.end.p0(ptr nocapture) nounwind declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i32, i1) nounwind diff --git a/llvm/test/Transforms/DeadStoreElimination/nounwind-invoke.ll b/llvm/test/Transforms/DeadStoreElimination/nounwind-invoke.ll index 3712bec..7293228 100644 --- a/llvm/test/Transforms/DeadStoreElimination/nounwind-invoke.ll +++ b/llvm/test/Transforms/DeadStoreElimination/nounwind-invoke.ll @@ -9,7 +9,7 @@ define void @test_nounwind_invoke() personality ptr @__gxx_personality_v0 { ; CHECK-NEXT: invoke void @foo(ptr [[TMP]]) ; CHECK-NEXT: to label [[BB1:%.*]] unwind label [[BB2:%.*]] ; CHECK: bb1: -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP]]) ; CHECK-NEXT: ret void ; CHECK: bb2: ; CHECK-NEXT: [[ABCTMP1:%.*]] = landingpad { ptr, i32 } @@ -26,7 +26,7 @@ bb: to label %bb1 unwind label %bb2 bb1: ; preds = %bb - call void @llvm.lifetime.end.p0(i64 4, ptr %tmp) + call void @llvm.lifetime.end.p0(ptr %tmp) ret void bb2: ; preds = %bb @@ -36,7 +36,7 @@ bb2: ; preds = %bb } ; Function Attrs: argmemonly nocallback nofree nosync nounwind willreturn -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #0 +declare void @llvm.lifetime.end.p0(ptr nocapture) #0 ; Function Attrs: argmemonly nounwind willreturn declare void @foo(ptr) #1 declare i32 @__gxx_personality_v0(...) diff --git a/llvm/test/Transforms/DeadStoreElimination/simple.ll b/llvm/test/Transforms/DeadStoreElimination/simple.ll index 6c04e15..9d28395 100644 --- a/llvm/test/Transforms/DeadStoreElimination/simple.ll +++ b/llvm/test/Transforms/DeadStoreElimination/simple.ll @@ -697,26 +697,26 @@ define void @test39_atomic(ptr %P, ptr %Q, ptr %R) { declare void @llvm.memmove.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1) declare void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i32) -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind +declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind +declare void @llvm.lifetime.end.p0(ptr nocapture) nounwind define void @test40(ptr noalias %Pp, ptr noalias %Q) { ; CHECK-LABEL: @test40( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[A]]) ; CHECK-NEXT: [[PC:%.*]] = load ptr, ptr [[PP:%.*]], align 8 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 4 [[A]], ptr align 4 [[Q:%.*]], i64 4, i1 false) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[PC]], ptr nonnull align 4 [[A]], i64 4, i1 true) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[A]]) ; CHECK-NEXT: ret void ; entry: %A = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %A) + call void @llvm.lifetime.start.p0(ptr nonnull %A) %Pc = load ptr, ptr %Pp, align 8 call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 4 %A, ptr align 4 %Q, i64 4, i1 false) call void @llvm.memcpy.p0.p0.i64(ptr align 4 %Pc, ptr nonnull align 4 %A, i64 4, i1 true) - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %A) + call void @llvm.lifetime.end.p0(ptr nonnull %A) ret void } diff --git a/llvm/test/Transforms/DeadStoreElimination/trivial-dse-calls.ll b/llvm/test/Transforms/DeadStoreElimination/trivial-dse-calls.ll index df2feb0..0970ed3 100644 --- a/llvm/test/Transforms/DeadStoreElimination/trivial-dse-calls.ll +++ b/llvm/test/Transforms/DeadStoreElimination/trivial-dse-calls.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt -passes=dse -S < %s | FileCheck %s -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) declare void @unknown() declare void @f(ptr) @@ -23,14 +23,14 @@ define void @test_dead() { define void @test_lifetime() { ; CHECK-LABEL: @test_lifetime( ; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[A]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]]) ; CHECK-NEXT: ret void ; %a = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr %a) + call void @llvm.lifetime.start.p0(ptr %a) call void @f(ptr writeonly nocapture %a) argmemonly nounwind willreturn - call void @llvm.lifetime.end.p0(i64 4, ptr %a) + call void @llvm.lifetime.end.p0(ptr %a) ret void } @@ -39,18 +39,18 @@ define void @test_lifetime() { define void @test_lifetime2() { ; CHECK-LABEL: @test_lifetime2( ; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]]) ; CHECK-NEXT: call void @unknown() ; CHECK-NEXT: call void @unknown() -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]]) ; CHECK-NEXT: ret void ; %a = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr %a) + call void @llvm.lifetime.start.p0(ptr %a) call void @unknown() call void @f(ptr writeonly nocapture %a) argmemonly nounwind willreturn call void @unknown() - call void @llvm.lifetime.end.p0(i64 4, ptr %a) + call void @llvm.lifetime.end.p0(ptr %a) ret void } diff --git a/llvm/test/Transforms/EarlyCSE/memoryssa.ll b/llvm/test/Transforms/EarlyCSE/memoryssa.ll index ba4cce4..f7f7ba3 100644 --- a/llvm/test/Transforms/EarlyCSE/memoryssa.ll +++ b/llvm/test/Transforms/EarlyCSE/memoryssa.ll @@ -146,12 +146,12 @@ define void @test_writeback_lifetimes() { ; CHECK-NOMEMSSA-LABEL: @test_writeback_lifetimes( ; CHECK-NOMEMSSA-NEXT: entry: ; CHECK-NOMEMSSA-NEXT: [[P:%.*]] = alloca i64, align 8 -; CHECK-NOMEMSSA-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[P]]) +; CHECK-NOMEMSSA-NEXT: call void @llvm.lifetime.start.p0(ptr [[P]]) ; CHECK-NOMEMSSA-NEXT: [[Q:%.*]] = getelementptr i32, ptr [[P]], i64 1 ; CHECK-NOMEMSSA-NEXT: [[PV:%.*]] = load i32, ptr [[P]], align 4 ; CHECK-NOMEMSSA-NEXT: [[QV:%.*]] = load i32, ptr [[Q]], align 4 -; CHECK-NOMEMSSA-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[P]]) -; CHECK-NOMEMSSA-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[P]]) +; CHECK-NOMEMSSA-NEXT: call void @llvm.lifetime.end.p0(ptr [[P]]) +; CHECK-NOMEMSSA-NEXT: call void @llvm.lifetime.start.p0(ptr [[P]]) ; CHECK-NOMEMSSA-NEXT: store i32 [[PV]], ptr [[P]], align 4 ; CHECK-NOMEMSSA-NEXT: store i32 [[QV]], ptr [[Q]], align 4 ; CHECK-NOMEMSSA-NEXT: ret void @@ -159,24 +159,24 @@ define void @test_writeback_lifetimes() { ; CHECK-LABEL: @test_writeback_lifetimes( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[P:%.*]] = alloca i64, align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[P]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[P]]) ; CHECK-NEXT: [[Q:%.*]] = getelementptr i32, ptr [[P]], i64 1 ; CHECK-NEXT: [[PV:%.*]] = load i32, ptr [[P]], align 4 ; CHECK-NEXT: [[QV:%.*]] = load i32, ptr [[Q]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[P]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[P]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[P]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[P]]) ; CHECK-NEXT: store i32 [[PV]], ptr [[P]], align 4 ; CHECK-NEXT: store i32 [[QV]], ptr [[Q]], align 4 ; CHECK-NEXT: ret void ; entry: %p = alloca i64 - call void @llvm.lifetime.start.p0(i64 8, ptr %p) + call void @llvm.lifetime.start.p0(ptr %p) %q = getelementptr i32, ptr %p, i64 1 %pv = load i32, ptr %p %qv = load i32, ptr %q - call void @llvm.lifetime.end.p0(i64 8, ptr %p) - call void @llvm.lifetime.start.p0(i64 8, ptr %p) + call void @llvm.lifetime.end.p0(ptr %p) + call void @llvm.lifetime.start.p0(ptr %p) store i32 %pv, ptr %p store i32 %qv, ptr %q ret void @@ -188,11 +188,11 @@ define void @test_writeback_lifetimes_multi_arg(ptr %q) { ; CHECK-NOMEMSSA-LABEL: @test_writeback_lifetimes_multi_arg( ; CHECK-NOMEMSSA-NEXT: entry: ; CHECK-NOMEMSSA-NEXT: [[P:%.*]] = alloca i64, align 8 -; CHECK-NOMEMSSA-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[P]]) +; CHECK-NOMEMSSA-NEXT: call void @llvm.lifetime.start.p0(ptr [[P]]) ; CHECK-NOMEMSSA-NEXT: [[PV:%.*]] = load i32, ptr [[P]], align 4 ; CHECK-NOMEMSSA-NEXT: [[QV:%.*]] = load i32, ptr [[Q:%.*]], align 4 -; CHECK-NOMEMSSA-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[P]]) -; CHECK-NOMEMSSA-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[P]]) +; CHECK-NOMEMSSA-NEXT: call void @llvm.lifetime.end.p0(ptr [[P]]) +; CHECK-NOMEMSSA-NEXT: call void @llvm.lifetime.start.p0(ptr [[P]]) ; CHECK-NOMEMSSA-NEXT: store i32 [[PV]], ptr [[P]], align 4 ; CHECK-NOMEMSSA-NEXT: store i32 [[QV]], ptr [[Q]], align 4 ; CHECK-NOMEMSSA-NEXT: ret void @@ -200,25 +200,25 @@ define void @test_writeback_lifetimes_multi_arg(ptr %q) { ; CHECK-LABEL: @test_writeback_lifetimes_multi_arg( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[P:%.*]] = alloca i64, align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[P]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[P]]) ; CHECK-NEXT: [[PV:%.*]] = load i32, ptr [[P]], align 4 ; CHECK-NEXT: [[QV:%.*]] = load i32, ptr [[Q:%.*]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[P]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[P]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[P]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[P]]) ; CHECK-NEXT: store i32 [[PV]], ptr [[P]], align 4 ; CHECK-NEXT: ret void ; entry: %p = alloca i64 - call void @llvm.lifetime.start.p0(i64 8, ptr %p) + call void @llvm.lifetime.start.p0(ptr %p) %pv = load i32, ptr %p %qv = load i32, ptr %q - call void @llvm.lifetime.end.p0(i64 8, ptr %p) - call void @llvm.lifetime.start.p0(i64 8, ptr %p) + call void @llvm.lifetime.end.p0(ptr %p) + call void @llvm.lifetime.start.p0(ptr %p) store i32 %pv, ptr %p store i32 %qv, ptr %q ret void } -declare void @llvm.lifetime.end.p0(i64, ptr) -declare void @llvm.lifetime.start.p0(i64, ptr) +declare void @llvm.lifetime.end.p0(ptr) +declare void @llvm.lifetime.start.p0(ptr) diff --git a/llvm/test/Transforms/ExpandVariadics/expand-va-intrinsic-split-linkage.ll b/llvm/test/Transforms/ExpandVariadics/expand-va-intrinsic-split-linkage.ll index f7e21cd..736b072 100644 --- a/llvm/test/Transforms/ExpandVariadics/expand-va-intrinsic-split-linkage.ll +++ b/llvm/test/Transforms/ExpandVariadics/expand-va-intrinsic-split-linkage.ll @@ -25,11 +25,11 @@ define void @defn_simple(...) { ; OPT-LABEL: define {{[^@]+}}@defn_simple(...) { ; OPT-NEXT: entry: ; OPT-NEXT: %va_start = alloca ptr, align 4 -; OPT-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr %va_start) +; OPT-NEXT: call void @llvm.lifetime.start.p0(ptr %va_start) ; OPT-NEXT: call void @llvm.va_start.p0(ptr %va_start) ; OPT-NEXT: %0 = load ptr, ptr %va_start, align 4 ; OPT-NEXT: call void @defn_simple.valist(ptr %0) -; OPT-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr %va_start) +; OPT-NEXT: call void @llvm.lifetime.end.p0(ptr %va_start) ; OPT-NEXT: ret void ; ; ABI-LABEL: define {{[^@]+}}@defn_simple(ptr %varargs) { @@ -50,11 +50,11 @@ define private void @defn_private_simple(...) { ; OPT-LABEL: define {{[^@]+}}@defn_private_simple(...) { ; OPT-NEXT: entry: ; OPT-NEXT: %va_start = alloca ptr, align 4 -; OPT-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr %va_start) +; OPT-NEXT: call void @llvm.lifetime.start.p0(ptr %va_start) ; OPT-NEXT: call void @llvm.va_start.p0(ptr %va_start) ; OPT-NEXT: %0 = load ptr, ptr %va_start, align 4 ; OPT-NEXT: call void @defn_private_simple.valist(ptr %0) -; OPT-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr %va_start) +; OPT-NEXT: call void @llvm.lifetime.end.p0(ptr %va_start) ; OPT-NEXT: ret void ; ; ABI-LABEL: define {{[^@]+}}@defn_private_simple(ptr %varargs) { @@ -75,11 +75,11 @@ define internal void @defn_internal_simple(...) { ; OPT-LABEL: define {{[^@]+}}@defn_internal_simple(...) { ; OPT-NEXT: entry: ; OPT-NEXT: %va_start = alloca ptr, align 4 -; OPT-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr %va_start) +; OPT-NEXT: call void @llvm.lifetime.start.p0(ptr %va_start) ; OPT-NEXT: call void @llvm.va_start.p0(ptr %va_start) ; OPT-NEXT: %0 = load ptr, ptr %va_start, align 4 ; OPT-NEXT: call void @defn_internal_simple.valist(ptr %0) -; OPT-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr %va_start) +; OPT-NEXT: call void @llvm.lifetime.end.p0(ptr %va_start) ; OPT-NEXT: ret void ; ; ABI-LABEL: define {{[^@]+}}@defn_internal_simple(ptr %varargs) { @@ -211,11 +211,11 @@ define external void @defn_external_simple(...) { ; OPT-LABEL: define {{[^@]+}}@defn_external_simple(...) { ; OPT-NEXT: entry: ; OPT-NEXT: %va_start = alloca ptr, align 4 -; OPT-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr %va_start) +; OPT-NEXT: call void @llvm.lifetime.start.p0(ptr %va_start) ; OPT-NEXT: call void @llvm.va_start.p0(ptr %va_start) ; OPT-NEXT: %0 = load ptr, ptr %va_start, align 4 ; OPT-NEXT: call void @defn_external_simple.valist(ptr %0) -; OPT-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr %va_start) +; OPT-NEXT: call void @llvm.lifetime.end.p0(ptr %va_start) ; OPT-NEXT: ret void ; ; ABI-LABEL: define {{[^@]+}}@defn_external_simple(ptr %varargs) { diff --git a/llvm/test/Transforms/ExpandVariadics/expand-va-intrinsic-split-simple.ll b/llvm/test/Transforms/ExpandVariadics/expand-va-intrinsic-split-simple.ll index 96cc826..e21b72d 100644 --- a/llvm/test/Transforms/ExpandVariadics/expand-va-intrinsic-split-simple.ll +++ b/llvm/test/Transforms/ExpandVariadics/expand-va-intrinsic-split-simple.ll @@ -10,11 +10,11 @@ define i32 @variadic_int_double_get_firstz(...) { ; OPT-LABEL: define {{[^@]+}}@variadic_int_double_get_firstz(...) { ; OPT-NEXT: entry: ; OPT-NEXT: %va_start = alloca ptr, align 4 -; OPT-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr %va_start) +; OPT-NEXT: call void @llvm.lifetime.start.p0(ptr %va_start) ; OPT-NEXT: call void @llvm.va_start.p0(ptr %va_start) ; OPT-NEXT: %0 = load ptr, ptr %va_start, align 4 ; OPT-NEXT: %1 = call i32 @variadic_int_double_get_firstz.valist(ptr %0) -; OPT-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr %va_start) +; OPT-NEXT: call void @llvm.lifetime.end.p0(ptr %va_start) ; OPT-NEXT: ret i32 %1 ; ; ABI-LABEL: define {{[^@]+}}@variadic_int_double_get_firstz(ptr %varargs) { @@ -61,11 +61,11 @@ define double @variadic_int_double_get_secondz(...) { ; OPT-LABEL: define {{[^@]+}}@variadic_int_double_get_secondz(...) { ; OPT-NEXT: entry: ; OPT-NEXT: %va_start = alloca ptr, align 4 -; OPT-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr %va_start) +; OPT-NEXT: call void @llvm.lifetime.start.p0(ptr %va_start) ; OPT-NEXT: call void @llvm.va_start.p0(ptr %va_start) ; OPT-NEXT: %0 = load ptr, ptr %va_start, align 4 ; OPT-NEXT: %1 = call double @variadic_int_double_get_secondz.valist(ptr %0) -; OPT-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr %va_start) +; OPT-NEXT: call void @llvm.lifetime.end.p0(ptr %va_start) ; OPT-NEXT: ret double %1 ; ; ABI-LABEL: define {{[^@]+}}@variadic_int_double_get_secondz(ptr %varargs) { @@ -115,13 +115,13 @@ entry: ; CHECK-LABEL: @variadic_can_get_firstIidEEbT_T0_(i32 %x, double %y) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %variadic_can_get_firstIidEEbT_T0_.vararg, align 16 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds %variadic_can_get_firstIidEEbT_T0_.vararg, ptr %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store i32 %x, ptr %0, align 4 ; CHECK-NEXT: %1 = getelementptr inbounds %variadic_can_get_firstIidEEbT_T0_.vararg, ptr %vararg_buffer, i32 0, i32 1 ; CHECK-NEXT: store double %y, ptr %1, align 4 ; CHECK-NEXT: %call = call i32 @variadic_int_double_get_firstz.valist(ptr %vararg_buffer) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer) ; CHECK-NEXT: %cmp.i = icmp eq i32 %call, %x ; CHECK-NEXT: ret i1 %cmp.i ; CHECK-NEXT: } @@ -130,26 +130,26 @@ define zeroext i1 @variadic_can_get_firstIidEEbT_T0_(i32 %x, double %y) { ; OPT-LABEL: define {{[^@]+}}@variadic_can_get_firstIidEEbT_T0_(i32 %x, double %y) { ; OPT-NEXT: entry: ; OPT-NEXT: %vararg_buffer = alloca %variadic_can_get_firstIidEEbT_T0_.vararg, align 16 -; OPT-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr %vararg_buffer) +; OPT-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer) ; OPT-NEXT: %0 = getelementptr inbounds nuw %variadic_can_get_firstIidEEbT_T0_.vararg, ptr %vararg_buffer, i32 0, i32 0 ; OPT-NEXT: store i32 %x, ptr %0, align 4 ; OPT-NEXT: %1 = getelementptr inbounds nuw %variadic_can_get_firstIidEEbT_T0_.vararg, ptr %vararg_buffer, i32 0, i32 2 ; OPT-NEXT: store double %y, ptr %1, align 8 ; OPT-NEXT: %call = call i32 @variadic_int_double_get_firstz.valist(ptr %vararg_buffer) -; OPT-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr %vararg_buffer) +; OPT-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer) ; OPT-NEXT: %cmp.i = icmp eq i32 %call, %x ; OPT-NEXT: ret i1 %cmp.i ; ; ABI-LABEL: define {{[^@]+}}@variadic_can_get_firstIidEEbT_T0_(i32 %x, double %y) { ; ABI-NEXT: entry: ; ABI-NEXT: %vararg_buffer = alloca %variadic_can_get_firstIidEEbT_T0_.vararg, align 16 -; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr %vararg_buffer) +; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer) ; ABI-NEXT: %0 = getelementptr inbounds nuw %variadic_can_get_firstIidEEbT_T0_.vararg, ptr %vararg_buffer, i32 0, i32 0 ; ABI-NEXT: store i32 %x, ptr %0, align 4 ; ABI-NEXT: %1 = getelementptr inbounds nuw %variadic_can_get_firstIidEEbT_T0_.vararg, ptr %vararg_buffer, i32 0, i32 2 ; ABI-NEXT: store double %y, ptr %1, align 8 ; ABI-NEXT: %call = call i32 @variadic_int_double_get_firstz(ptr %vararg_buffer) -; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr %vararg_buffer) +; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer) ; ABI-NEXT: %cmp.i = icmp eq i32 %call, %x ; ABI-NEXT: ret i1 %cmp.i ; @@ -162,13 +162,13 @@ entry: ; CHECK-LABEL: @variadic_can_get_secondIidEEbT_T0_(i32 %x, double %y) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %variadic_can_get_secondIidEEbT_T0_.vararg, align 16 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds %variadic_can_get_secondIidEEbT_T0_.vararg, ptr %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store i32 %x, ptr %0, align 4 ; CHECK-NEXT: %1 = getelementptr inbounds %variadic_can_get_secondIidEEbT_T0_.vararg, ptr %vararg_buffer, i32 0, i32 1 ; CHECK-NEXT: store double %y, ptr %1, align 4 ; CHECK-NEXT: %call = call double @variadic_int_double_get_secondz.valist(ptr %vararg_buffer) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer) ; CHECK-NEXT: %cmp.i = fcmp oeq double %call, %y ; CHECK-NEXT: ret i1 %cmp.i ; CHECK-NEXT: } @@ -177,26 +177,26 @@ define zeroext i1 @variadic_can_get_secondIidEEbT_T0_(i32 %x, double %y) { ; OPT-LABEL: define {{[^@]+}}@variadic_can_get_secondIidEEbT_T0_(i32 %x, double %y) { ; OPT-NEXT: entry: ; OPT-NEXT: %vararg_buffer = alloca %variadic_can_get_secondIidEEbT_T0_.vararg, align 16 -; OPT-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr %vararg_buffer) +; OPT-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer) ; OPT-NEXT: %0 = getelementptr inbounds nuw %variadic_can_get_secondIidEEbT_T0_.vararg, ptr %vararg_buffer, i32 0, i32 0 ; OPT-NEXT: store i32 %x, ptr %0, align 4 ; OPT-NEXT: %1 = getelementptr inbounds nuw %variadic_can_get_secondIidEEbT_T0_.vararg, ptr %vararg_buffer, i32 0, i32 2 ; OPT-NEXT: store double %y, ptr %1, align 8 ; OPT-NEXT: %call = call double @variadic_int_double_get_secondz.valist(ptr %vararg_buffer) -; OPT-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr %vararg_buffer) +; OPT-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer) ; OPT-NEXT: %cmp.i = fcmp oeq double %call, %y ; OPT-NEXT: ret i1 %cmp.i ; ; ABI-LABEL: define {{[^@]+}}@variadic_can_get_secondIidEEbT_T0_(i32 %x, double %y) { ; ABI-NEXT: entry: ; ABI-NEXT: %vararg_buffer = alloca %variadic_can_get_secondIidEEbT_T0_.vararg, align 16 -; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr %vararg_buffer) +; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer) ; ABI-NEXT: %0 = getelementptr inbounds nuw %variadic_can_get_secondIidEEbT_T0_.vararg, ptr %vararg_buffer, i32 0, i32 0 ; ABI-NEXT: store i32 %x, ptr %0, align 4 ; ABI-NEXT: %1 = getelementptr inbounds nuw %variadic_can_get_secondIidEEbT_T0_.vararg, ptr %vararg_buffer, i32 0, i32 2 ; ABI-NEXT: store double %y, ptr %1, align 8 ; ABI-NEXT: %call = call double @variadic_int_double_get_secondz(ptr %vararg_buffer) -; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr %vararg_buffer) +; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer) ; ABI-NEXT: %cmp.i = fcmp oeq double %call, %y ; ABI-NEXT: ret i1 %cmp.i ; diff --git a/llvm/test/Transforms/ExpandVariadics/indirect-calls.ll b/llvm/test/Transforms/ExpandVariadics/indirect-calls.ll index b661f7f..0f178c7 100644 --- a/llvm/test/Transforms/ExpandVariadics/indirect-calls.ll +++ b/llvm/test/Transforms/ExpandVariadics/indirect-calls.ll @@ -19,11 +19,11 @@ define hidden void @fptr_single_i32(i32 noundef %x) { ; ABI-NEXT: entry: ; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[FPTR_SINGLE_I32_VARARG:%.*]], align 16 ; ABI-NEXT: [[TMP0:%.*]] = load volatile ptr, ptr @vararg_ptr, align 4 -; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[VARARG_BUFFER]]) +; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]]) ; ABI-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[FPTR_SINGLE_I32_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0 ; ABI-NEXT: store i32 [[X:%.*]], ptr [[TMP1]], align 4 ; ABI-NEXT: call void [[TMP0]](ptr [[VARARG_BUFFER]]) -; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[VARARG_BUFFER]]) +; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]]) ; ABI-NEXT: ret void ; entry: @@ -45,11 +45,11 @@ define hidden void @fptr_libcS(ptr noundef byval(%struct.libcS) align 8 %x) { ; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[FPTR_LIBCS_VARARG:%.*]], align 16 ; ABI-NEXT: [[TMP0:%.*]] = load volatile ptr, ptr @vararg_ptr, align 4 ; ABI-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[INDIRECTALLOCA]], ptr [[X:%.*]], i64 24, i1 false) -; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[VARARG_BUFFER]]) +; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]]) ; ABI-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[FPTR_LIBCS_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0 ; ABI-NEXT: store ptr [[INDIRECTALLOCA]], ptr [[TMP1]], align 4 ; ABI-NEXT: call void [[TMP0]](ptr [[VARARG_BUFFER]]) -; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[VARARG_BUFFER]]) +; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]]) ; ABI-NEXT: ret void ; entry: diff --git a/llvm/test/Transforms/ExpandVariadics/intrinsics.ll b/llvm/test/Transforms/ExpandVariadics/intrinsics.ll index 1782c92..52ce80e 100644 --- a/llvm/test/Transforms/ExpandVariadics/intrinsics.ll +++ b/llvm/test/Transforms/ExpandVariadics/intrinsics.ll @@ -3,13 +3,13 @@ ; RUN: opt -mtriple=wasm32-unknown-unknown -S --passes=expand-variadics --expand-variadics-override=lowering < %s | FileCheck %s -check-prefixes=CHECK,ABI ; REQUIRES: webassembly-registered-target -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) declare void @llvm.va_copy.p0(ptr, ptr) declare void @valist(ptr noundef) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) declare void @llvm.va_start.p0(ptr) @@ -20,31 +20,31 @@ define void @start_once(...) { ; OPT-LABEL: @start_once( ; OPT-NEXT: entry: ; OPT-NEXT: [[VA_START:%.*]] = alloca ptr, align 4 -; OPT-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[VA_START]]) +; OPT-NEXT: call void @llvm.lifetime.start.p0(ptr [[VA_START]]) ; OPT-NEXT: call void @llvm.va_start.p0(ptr [[VA_START]]) ; OPT-NEXT: [[TMP0:%.*]] = load ptr, ptr [[VA_START]], align 4 ; OPT-NEXT: call void @start_once.valist(ptr [[TMP0]]) -; OPT-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[VA_START]]) +; OPT-NEXT: call void @llvm.lifetime.end.p0(ptr [[VA_START]]) ; OPT-NEXT: ret void ; ; ABI-LABEL: @start_once( ; ABI-NEXT: entry: ; ABI-NEXT: [[S:%.*]] = alloca ptr, align 4 -; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[S]]) +; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[S]]) ; ABI-NEXT: store ptr [[VARARGS:%.*]], ptr [[S]], align 4 ; ABI-NEXT: [[TMP0:%.*]] = load ptr, ptr [[S]], align 4 ; ABI-NEXT: call void @valist(ptr noundef [[TMP0]]) -; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[S]]) +; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[S]]) ; ABI-NEXT: ret void ; entry: %s = alloca ptr, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %s) + call void @llvm.lifetime.start.p0(ptr nonnull %s) call void @llvm.va_start.p0(ptr nonnull %s) %0 = load ptr, ptr %s, align 4 call void @valist(ptr noundef %0) call void @llvm.va_end.p0(ptr %s) - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %s) + call void @llvm.lifetime.end.p0(ptr nonnull %s) ret void } @@ -53,34 +53,34 @@ define void @start_twice(...) { ; OPT-LABEL: @start_twice( ; OPT-NEXT: entry: ; OPT-NEXT: [[VA_START:%.*]] = alloca ptr, align 4 -; OPT-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[VA_START]]) +; OPT-NEXT: call void @llvm.lifetime.start.p0(ptr [[VA_START]]) ; OPT-NEXT: call void @llvm.va_start.p0(ptr [[VA_START]]) ; OPT-NEXT: [[TMP0:%.*]] = load ptr, ptr [[VA_START]], align 4 ; OPT-NEXT: call void @start_twice.valist(ptr [[TMP0]]) -; OPT-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[VA_START]]) +; OPT-NEXT: call void @llvm.lifetime.end.p0(ptr [[VA_START]]) ; OPT-NEXT: ret void ; ; ABI-LABEL: @start_twice( ; ABI-NEXT: entry: ; ABI-NEXT: [[S0:%.*]] = alloca ptr, align 4 ; ABI-NEXT: [[S1:%.*]] = alloca ptr, align 4 -; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[S0]]) -; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[S1]]) +; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[S0]]) +; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[S1]]) ; ABI-NEXT: store ptr [[VARARGS:%.*]], ptr [[S0]], align 4 ; ABI-NEXT: [[TMP0:%.*]] = load ptr, ptr [[S0]], align 4 ; ABI-NEXT: call void @valist(ptr noundef [[TMP0]]) ; ABI-NEXT: store ptr [[VARARGS]], ptr [[S1]], align 4 ; ABI-NEXT: [[TMP1:%.*]] = load ptr, ptr [[S1]], align 4 ; ABI-NEXT: call void @valist(ptr noundef [[TMP1]]) -; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[S1]]) -; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[S0]]) +; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[S1]]) +; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[S0]]) ; ABI-NEXT: ret void ; entry: %s0 = alloca ptr, align 4 %s1 = alloca ptr, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %s0) - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %s1) + call void @llvm.lifetime.start.p0(ptr nonnull %s0) + call void @llvm.lifetime.start.p0(ptr nonnull %s1) call void @llvm.va_start.p0(ptr nonnull %s0) %0 = load ptr, ptr %s0, align 4 call void @valist(ptr noundef %0) @@ -89,8 +89,8 @@ entry: %1 = load ptr, ptr %s1, align 4 call void @valist(ptr noundef %1) call void @llvm.va_end.p0(ptr %s1) - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %s1) - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %s0) + call void @llvm.lifetime.end.p0(ptr nonnull %s1) + call void @llvm.lifetime.end.p0(ptr nonnull %s0) ret void } @@ -100,21 +100,21 @@ define void @copy(ptr noundef %va) { ; CHECK-NEXT: [[VA_ADDR:%.*]] = alloca ptr, align 4 ; CHECK-NEXT: [[CP:%.*]] = alloca ptr, align 4 ; CHECK-NEXT: store ptr [[VA:%.*]], ptr [[VA_ADDR]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[CP]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[CP]]) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr [[CP]], ptr [[VA_ADDR]], i32 4, i1 false) ; CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[CP]], align 4 ; CHECK-NEXT: call void @valist(ptr noundef [[TMP0]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[CP]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[CP]]) ; CHECK-NEXT: ret void ; entry: %va.addr = alloca ptr, align 4 %cp = alloca ptr, align 4 store ptr %va, ptr %va.addr, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %cp) + call void @llvm.lifetime.start.p0(ptr nonnull %cp) call void @llvm.va_copy.p0(ptr nonnull %cp, ptr nonnull %va.addr) %0 = load ptr, ptr %cp, align 4 call void @valist(ptr noundef %0) - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %cp) + call void @llvm.lifetime.end.p0(ptr nonnull %cp) ret void } diff --git a/llvm/test/Transforms/ExpandVariadics/pass-byval-byref.ll b/llvm/test/Transforms/ExpandVariadics/pass-byval-byref.ll index a9f27f7..83b33b93 100644 --- a/llvm/test/Transforms/ExpandVariadics/pass-byval-byref.ll +++ b/llvm/test/Transforms/ExpandVariadics/pass-byval-byref.ll @@ -16,11 +16,11 @@ define void @pass_byval(ptr byval(i32) %b) { ; ABI-LABEL: @pass_byval( ; ABI-NEXT: entry: ; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[PASS_BYVAL_VARARG:%.*]], align 16 -; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[VARARG_BUFFER]]) +; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]]) ; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[PASS_BYVAL_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0 ; ABI-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[TMP0]], ptr [[B:%.*]], i64 4, i1 false) ; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]]) -; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[VARARG_BUFFER]]) +; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]]) ; ABI-NEXT: ret void ; entry: @@ -41,13 +41,13 @@ define void @i32_libcS_byval(i32 %x, ptr noundef byval(%struct.libcS) align 8 %y ; ABI-NEXT: [[INDIRECTALLOCA:%.*]] = alloca [[STRUCT_LIBCS:%.*]], align 8 ; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[I32_LIBCS_BYVAL_VARARG:%.*]], align 16 ; ABI-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[INDIRECTALLOCA]], ptr [[Y:%.*]], i64 24, i1 false) -; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[VARARG_BUFFER]]) +; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]]) ; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[I32_LIBCS_BYVAL_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0 ; ABI-NEXT: store i32 [[X:%.*]], ptr [[TMP0]], align 4 ; ABI-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[I32_LIBCS_BYVAL_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 1 ; ABI-NEXT: store ptr [[INDIRECTALLOCA]], ptr [[TMP1]], align 4 ; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]]) -; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[VARARG_BUFFER]]) +; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]]) ; ABI-NEXT: ret void ; entry: @@ -66,13 +66,13 @@ define void @libcS_i32_byval(ptr byval(%struct.libcS) align 8 %x, i32 %y) { ; ABI-NEXT: [[INDIRECTALLOCA:%.*]] = alloca [[STRUCT_LIBCS:%.*]], align 8 ; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[LIBCS_I32_BYVAL_VARARG:%.*]], align 16 ; ABI-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[INDIRECTALLOCA]], ptr [[X:%.*]], i64 24, i1 false) -; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[VARARG_BUFFER]]) +; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]]) ; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[LIBCS_I32_BYVAL_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0 ; ABI-NEXT: store ptr [[INDIRECTALLOCA]], ptr [[TMP0]], align 4 ; ABI-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[LIBCS_I32_BYVAL_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 1 ; ABI-NEXT: store i32 [[Y:%.*]], ptr [[TMP1]], align 4 ; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]]) -; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[VARARG_BUFFER]]) +; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]]) ; ABI-NEXT: ret void ; entry: @@ -90,11 +90,11 @@ define void @pass_byref(ptr byref(i32) %b) { ; ABI-LABEL: @pass_byref( ; ABI-NEXT: entry: ; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[PASS_BYREF_VARARG:%.*]], align 16 -; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[VARARG_BUFFER]]) +; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]]) ; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[PASS_BYREF_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0 ; ABI-NEXT: store ptr [[B:%.*]], ptr [[TMP0]], align 4 ; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]]) -; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[VARARG_BUFFER]]) +; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]]) ; ABI-NEXT: ret void ; entry: @@ -113,13 +113,13 @@ define void @i32_libcS_byref(i32 %x, ptr noundef byref(%struct.libcS) align 8 %y ; ABI-NEXT: [[INDIRECTALLOCA:%.*]] = alloca [[STRUCT_LIBCS:%.*]], align 8 ; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[I32_LIBCS_BYREF_VARARG:%.*]], align 16 ; ABI-NEXT: store ptr [[Y:%.*]], ptr [[INDIRECTALLOCA]], align 4 -; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[VARARG_BUFFER]]) +; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]]) ; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[I32_LIBCS_BYREF_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0 ; ABI-NEXT: store i32 [[X:%.*]], ptr [[TMP0]], align 4 ; ABI-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[I32_LIBCS_BYREF_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 1 ; ABI-NEXT: store ptr [[INDIRECTALLOCA]], ptr [[TMP1]], align 4 ; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]]) -; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[VARARG_BUFFER]]) +; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]]) ; ABI-NEXT: ret void ; entry: @@ -138,13 +138,13 @@ define void @libcS_i32_byref(ptr byref(%struct.libcS) align 8 %x, i32 %y) { ; ABI-NEXT: [[INDIRECTALLOCA:%.*]] = alloca [[STRUCT_LIBCS:%.*]], align 8 ; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[LIBCS_I32_BYREF_VARARG:%.*]], align 16 ; ABI-NEXT: store ptr [[X:%.*]], ptr [[INDIRECTALLOCA]], align 4 -; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[VARARG_BUFFER]]) +; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]]) ; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[LIBCS_I32_BYREF_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0 ; ABI-NEXT: store ptr [[INDIRECTALLOCA]], ptr [[TMP0]], align 4 ; ABI-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[LIBCS_I32_BYREF_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 1 ; ABI-NEXT: store i32 [[Y:%.*]], ptr [[TMP1]], align 4 ; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]]) -; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[VARARG_BUFFER]]) +; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]]) ; ABI-NEXT: ret void ; entry: diff --git a/llvm/test/Transforms/ExpandVariadics/pass-indirect.ll b/llvm/test/Transforms/ExpandVariadics/pass-indirect.ll index 67cb269..46e1904 100644 --- a/llvm/test/Transforms/ExpandVariadics/pass-indirect.ll +++ b/llvm/test/Transforms/ExpandVariadics/pass-indirect.ll @@ -19,13 +19,13 @@ define void @i32_libcS(i32 %x, %struct.libcS %y) { ; ABI-NEXT: [[INDIRECTALLOCA:%.*]] = alloca [[STRUCT_LIBCS:%.*]], align 8 ; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[I32_LIBCS_VARARG:%.*]], align 16 ; ABI-NEXT: store [[STRUCT_LIBCS]] [[Y:%.*]], ptr [[INDIRECTALLOCA]], align 8 -; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[VARARG_BUFFER]]) +; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]]) ; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[I32_LIBCS_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0 ; ABI-NEXT: store i32 [[X:%.*]], ptr [[TMP0]], align 4 ; ABI-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[I32_LIBCS_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 1 ; ABI-NEXT: store ptr [[INDIRECTALLOCA]], ptr [[TMP1]], align 4 ; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]]) -; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[VARARG_BUFFER]]) +; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]]) ; ABI-NEXT: ret void ; entry: @@ -44,13 +44,13 @@ define void @libcS_i32(%struct.libcS %x, i32 %y) { ; ABI-NEXT: [[INDIRECTALLOCA:%.*]] = alloca [[STRUCT_LIBCS:%.*]], align 8 ; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[LIBCS_I32_VARARG:%.*]], align 16 ; ABI-NEXT: store [[STRUCT_LIBCS]] [[X:%.*]], ptr [[INDIRECTALLOCA]], align 8 -; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[VARARG_BUFFER]]) +; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]]) ; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[LIBCS_I32_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0 ; ABI-NEXT: store ptr [[INDIRECTALLOCA]], ptr [[TMP0]], align 4 ; ABI-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[LIBCS_I32_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 1 ; ABI-NEXT: store i32 [[Y:%.*]], ptr [[TMP1]], align 4 ; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]]) -; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[VARARG_BUFFER]]) +; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]]) ; ABI-NEXT: ret void ; entry: diff --git a/llvm/test/Transforms/ExpandVariadics/pass-integers.ll b/llvm/test/Transforms/ExpandVariadics/pass-integers.ll index 7a0c004..cf52724 100644 --- a/llvm/test/Transforms/ExpandVariadics/pass-integers.ll +++ b/llvm/test/Transforms/ExpandVariadics/pass-integers.ll @@ -17,9 +17,9 @@ define void @pass_nothing() { ; ABI-LABEL: @pass_nothing( ; ABI-NEXT: entry: ; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[PASS_NOTHING_VARARG:%.*]], align 16 -; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[VARARG_BUFFER]]) +; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]]) ; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]]) -; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[VARARG_BUFFER]]) +; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]]) ; ABI-NEXT: ret void ; entry: @@ -36,11 +36,11 @@ define void @pass_s1(i8 %x) { ; ABI-LABEL: @pass_s1( ; ABI-NEXT: entry: ; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[PASS_S1_VARARG:%.*]], align 16 -; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[VARARG_BUFFER]]) +; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]]) ; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[PASS_S1_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0 ; ABI-NEXT: store i8 [[X:%.*]], ptr [[TMP0]], align 1 ; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]]) -; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[VARARG_BUFFER]]) +; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]]) ; ABI-NEXT: ret void ; entry: @@ -57,11 +57,11 @@ define void @pass_s2(i16 %x) { ; ABI-LABEL: @pass_s2( ; ABI-NEXT: entry: ; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[PASS_S2_VARARG:%.*]], align 16 -; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[VARARG_BUFFER]]) +; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]]) ; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[PASS_S2_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0 ; ABI-NEXT: store i16 [[X:%.*]], ptr [[TMP0]], align 2 ; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]]) -; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[VARARG_BUFFER]]) +; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]]) ; ABI-NEXT: ret void ; entry: @@ -78,11 +78,11 @@ define void @pass_s3(i32 %x) { ; ABI-LABEL: @pass_s3( ; ABI-NEXT: entry: ; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[PASS_S3_VARARG:%.*]], align 16 -; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[VARARG_BUFFER]]) +; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]]) ; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[PASS_S3_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0 ; ABI-NEXT: store i32 [[X:%.*]], ptr [[TMP0]], align 4 ; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]]) -; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[VARARG_BUFFER]]) +; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]]) ; ABI-NEXT: ret void ; entry: @@ -99,11 +99,11 @@ define void @pass_s4(i64 %x) { ; ABI-LABEL: @pass_s4( ; ABI-NEXT: entry: ; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[PASS_S4_VARARG:%.*]], align 16 -; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[VARARG_BUFFER]]) +; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]]) ; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[PASS_S4_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0 ; ABI-NEXT: store i64 [[X:%.*]], ptr [[TMP0]], align 8 ; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]]) -; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[VARARG_BUFFER]]) +; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]]) ; ABI-NEXT: ret void ; entry: @@ -120,11 +120,11 @@ define void @pass_s5(<4 x i32> %x) { ; ABI-LABEL: @pass_s5( ; ABI-NEXT: entry: ; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[PASS_S5_VARARG:%.*]], align 16 -; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[VARARG_BUFFER]]) +; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]]) ; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[PASS_S5_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0 ; ABI-NEXT: store <4 x i32> [[X:%.*]], ptr [[TMP0]], align 16 ; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]]) -; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[VARARG_BUFFER]]) +; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]]) ; ABI-NEXT: ret void ; entry: @@ -141,13 +141,13 @@ define void @pass_int_s1(i32 %i, i8 %x) { ; ABI-LABEL: @pass_int_s1( ; ABI-NEXT: entry: ; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[PASS_INT_S1_VARARG:%.*]], align 16 -; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 5, ptr [[VARARG_BUFFER]]) +; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]]) ; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[PASS_INT_S1_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0 ; ABI-NEXT: store i32 [[I:%.*]], ptr [[TMP0]], align 4 ; ABI-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[PASS_INT_S1_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 1 ; ABI-NEXT: store i8 [[X:%.*]], ptr [[TMP1]], align 1 ; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]]) -; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 5, ptr [[VARARG_BUFFER]]) +; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]]) ; ABI-NEXT: ret void ; entry: @@ -164,13 +164,13 @@ define void @pass_int_s2(i32 %i, i16 %x) { ; ABI-LABEL: @pass_int_s2( ; ABI-NEXT: entry: ; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[PASS_INT_S2_VARARG:%.*]], align 16 -; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 6, ptr [[VARARG_BUFFER]]) +; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]]) ; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[PASS_INT_S2_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0 ; ABI-NEXT: store i32 [[I:%.*]], ptr [[TMP0]], align 4 ; ABI-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[PASS_INT_S2_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 1 ; ABI-NEXT: store i16 [[X:%.*]], ptr [[TMP1]], align 2 ; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]]) -; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 6, ptr [[VARARG_BUFFER]]) +; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]]) ; ABI-NEXT: ret void ; entry: @@ -187,13 +187,13 @@ define void @pass_int_s3(i32 %i, i32 %x) { ; ABI-LABEL: @pass_int_s3( ; ABI-NEXT: entry: ; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[PASS_INT_S3_VARARG:%.*]], align 16 -; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[VARARG_BUFFER]]) +; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]]) ; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[PASS_INT_S3_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0 ; ABI-NEXT: store i32 [[I:%.*]], ptr [[TMP0]], align 4 ; ABI-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[PASS_INT_S3_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 1 ; ABI-NEXT: store i32 [[X:%.*]], ptr [[TMP1]], align 4 ; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]]) -; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[VARARG_BUFFER]]) +; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]]) ; ABI-NEXT: ret void ; entry: @@ -210,13 +210,13 @@ define void @pass_int_s4(i32 %i, i64 %x) { ; ABI-LABEL: @pass_int_s4( ; ABI-NEXT: entry: ; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[PASS_INT_S4_VARARG:%.*]], align 16 -; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[VARARG_BUFFER]]) +; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]]) ; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[PASS_INT_S4_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0 ; ABI-NEXT: store i32 [[I:%.*]], ptr [[TMP0]], align 4 ; ABI-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[PASS_INT_S4_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 2 ; ABI-NEXT: store i64 [[X:%.*]], ptr [[TMP1]], align 8 ; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]]) -; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[VARARG_BUFFER]]) +; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]]) ; ABI-NEXT: ret void ; entry: @@ -233,13 +233,13 @@ define void @pass_int_s5(i32 %i, <4 x i32> %x) { ; ABI-LABEL: @pass_int_s5( ; ABI-NEXT: entry: ; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[PASS_INT_S5_VARARG:%.*]], align 16 -; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[VARARG_BUFFER]]) +; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]]) ; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[PASS_INT_S5_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0 ; ABI-NEXT: store i32 [[I:%.*]], ptr [[TMP0]], align 4 ; ABI-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[PASS_INT_S5_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 2 ; ABI-NEXT: store <4 x i32> [[X:%.*]], ptr [[TMP1]], align 16 ; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]]) -; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr [[VARARG_BUFFER]]) +; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]]) ; ABI-NEXT: ret void ; entry: @@ -256,7 +256,7 @@ define void @pass_asc(i8 %x1, i16 %x2, i32 %x3, i64 %x4, <4 x i32> %x5) { ; ABI-LABEL: @pass_asc( ; ABI-NEXT: entry: ; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[PASS_ASC_VARARG:%.*]], align 16 -; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 48, ptr [[VARARG_BUFFER]]) +; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]]) ; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[PASS_ASC_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0 ; ABI-NEXT: store i8 [[X1:%.*]], ptr [[TMP0]], align 1 ; ABI-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[PASS_ASC_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 2 @@ -268,7 +268,7 @@ define void @pass_asc(i8 %x1, i16 %x2, i32 %x3, i64 %x4, <4 x i32> %x5) { ; ABI-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[PASS_ASC_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 8 ; ABI-NEXT: store <4 x i32> [[X5:%.*]], ptr [[TMP4]], align 16 ; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]]) -; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 48, ptr [[VARARG_BUFFER]]) +; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]]) ; ABI-NEXT: ret void ; entry: @@ -285,7 +285,7 @@ define void @pass_dsc(<4 x i32> %x0, i64 %x1, i32 %x2, i16 %x3, i8 %x4) { ; ABI-LABEL: @pass_dsc( ; ABI-NEXT: entry: ; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[PASS_DSC_VARARG:%.*]], align 16 -; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 33, ptr [[VARARG_BUFFER]]) +; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]]) ; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[PASS_DSC_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0 ; ABI-NEXT: store <4 x i32> [[X0:%.*]], ptr [[TMP0]], align 16 ; ABI-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[PASS_DSC_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 1 @@ -297,7 +297,7 @@ define void @pass_dsc(<4 x i32> %x0, i64 %x1, i32 %x2, i16 %x3, i8 %x4) { ; ABI-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[PASS_DSC_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 5 ; ABI-NEXT: store i8 [[X4:%.*]], ptr [[TMP4]], align 1 ; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]]) -; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 33, ptr [[VARARG_BUFFER]]) +; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]]) ; ABI-NEXT: ret void ; entry: @@ -316,7 +316,7 @@ define void @pass_multiple(i32 %i, i8 %x1, i16 %x2, i32 %x3, i64 %x4, <4 x i32> ; ABI-NEXT: entry: ; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[PASS_MULTIPLE_VARARG:%.*]], align 16 ; ABI-NEXT: [[VARARG_BUFFER1:%.*]] = alloca [[PASS_MULTIPLE_VARARG_0:%.*]], align 16 -; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[VARARG_BUFFER]]) +; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]]) ; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[PASS_MULTIPLE_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0 ; ABI-NEXT: store i32 [[I:%.*]], ptr [[TMP0]], align 4 ; ABI-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[PASS_MULTIPLE_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 1 @@ -324,8 +324,8 @@ define void @pass_multiple(i32 %i, i8 %x1, i16 %x2, i32 %x3, i64 %x4, <4 x i32> ; ABI-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[PASS_MULTIPLE_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 3 ; ABI-NEXT: store i64 [[X4:%.*]], ptr [[TMP2]], align 8 ; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]]) -; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[VARARG_BUFFER]]) -; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[VARARG_BUFFER1]]) +; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]]) +; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER1]]) ; ABI-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[PASS_MULTIPLE_VARARG_0]], ptr [[VARARG_BUFFER1]], i32 0, i32 0 ; ABI-NEXT: store i32 [[I]], ptr [[TMP3]], align 4 ; ABI-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[PASS_MULTIPLE_VARARG_0]], ptr [[VARARG_BUFFER1]], i32 0, i32 1 @@ -335,7 +335,7 @@ define void @pass_multiple(i32 %i, i8 %x1, i16 %x2, i32 %x3, i64 %x4, <4 x i32> ; ABI-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[PASS_MULTIPLE_VARARG_0]], ptr [[VARARG_BUFFER1]], i32 0, i32 5 ; ABI-NEXT: store <4 x i32> [[X5:%.*]], ptr [[TMP6]], align 16 ; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER1]]) -; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr [[VARARG_BUFFER1]]) +; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER1]]) ; ABI-NEXT: ret void ; entry: diff --git a/llvm/test/Transforms/GVN/cond_br2.ll b/llvm/test/Transforms/GVN/cond_br2.ll index 4202467..ff80328 100644 --- a/llvm/test/Transforms/GVN/cond_br2.ll +++ b/llvm/test/Transforms/GVN/cond_br2.ll @@ -17,7 +17,7 @@ define void @_Z4testv() #0 personality ptr @__gxx_personality_v0 { entry: %sv = alloca %"class.llvm::SmallVector", align 16 - call void @llvm.lifetime.start.p0(i64 64, ptr %sv) #1 + call void @llvm.lifetime.start.p0(ptr %sv) #1 %FirstEl.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SmallVector", ptr %sv, i64 0, i32 0, i32 0, i32 0, i32 0, i32 3 store ptr %FirstEl.i.i.i.i.i.i, ptr %sv, align 16, !tbaa !4 %EndX.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SmallVector", ptr %sv, i64 0, i32 0, i32 0, i32 0, i32 0, i32 1 @@ -86,7 +86,7 @@ if.then.i.i.i20: ; preds = %invoke.cont3 br label %_ZN4llvm11SmallVectorIiLj8EED1Ev.exit21 _ZN4llvm11SmallVectorIiLj8EED1Ev.exit21: ; preds = %invoke.cont3, %if.then.i.i.i20 - call void @llvm.lifetime.end.p0(i64 64, ptr %sv) #1 + call void @llvm.lifetime.end.p0(ptr %sv) #1 ret void lpad: ; preds = %if.end.i14, %if.end.i, %invoke.cont2 @@ -105,14 +105,14 @@ eh.resume: ; preds = %if.then.i.i.i, %lpa } ; Function Attrs: nounwind -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 declare i32 @__gxx_personality_v0(...) declare void @_Z1gRN4llvm11SmallVectorIiLj8EEE(ptr) #2 ; Function Attrs: nounwind -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 declare void @_ZN4llvm15SmallVectorBase8grow_podEmm(ptr, i64, i64) #2 diff --git a/llvm/test/Transforms/GVN/lifetime-simple.ll b/llvm/test/Transforms/GVN/lifetime-simple.ll index 89ca127..bd35052 100644 --- a/llvm/test/Transforms/GVN/lifetime-simple.ll +++ b/llvm/test/Transforms/GVN/lifetime-simple.ll @@ -6,18 +6,18 @@ define i8 @test() nounwind { ; CHECK-SAME: ) #[[ATTR0:[0-9]+]] { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[P:%.*]] = alloca [32 x i8], align 1 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[P]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[P]]) ; CHECK-NEXT: store i8 1, ptr [[P]], align 1 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr [[P]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[P]]) ; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[P]], align 1 ; CHECK-NEXT: ret i8 [[TMP0]] ; entry: %P = alloca [32 x i8] - call void @llvm.lifetime.start.p0(i64 32, ptr %P) + call void @llvm.lifetime.start.p0(ptr %P) %0 = load i8, ptr %P store i8 1, ptr %P - call void @llvm.lifetime.end.p0(i64 32, ptr %P) + call void @llvm.lifetime.end.p0(ptr %P) %1 = load i8, ptr %P ret i8 %1 } @@ -28,17 +28,17 @@ define void @assume_eq_arg(ptr %arg) { ; CHECK-NEXT: [[ALLOCA:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[ALLOCA]], [[ARG]] ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[ALLOCA]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ALLOCA]]) ; CHECK-NEXT: store volatile i32 0, ptr [[ALLOCA]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[ALLOCA]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ALLOCA]]) ; CHECK-NEXT: ret void ; %alloca = alloca i32 %cmp = icmp eq ptr %alloca, %arg call void @llvm.assume(i1 %cmp) - call void @llvm.lifetime.start.p0(i64 4, ptr %alloca) + call void @llvm.lifetime.start.p0(ptr %alloca) store volatile i32 0, ptr %alloca - call void @llvm.lifetime.end.p0(i64 4, ptr %alloca) + call void @llvm.lifetime.end.p0(ptr %alloca) ret void } @@ -47,17 +47,17 @@ define void @assume_eq_null() { ; CHECK-NEXT: [[ALLOCA:%.*]] = alloca i32, align 4, addrspace(1) ; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr addrspace(1) [[ALLOCA]], null ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p1(i64 4, ptr addrspace(1) [[ALLOCA]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p1(ptr addrspace(1) [[ALLOCA]]) ; CHECK-NEXT: store volatile i32 0, ptr addrspace(1) null, align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p1(i64 4, ptr addrspace(1) [[ALLOCA]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p1(ptr addrspace(1) [[ALLOCA]]) ; CHECK-NEXT: ret void ; %alloca = alloca i32, addrspace(1) %cmp = icmp eq ptr addrspace(1) %alloca, null call void @llvm.assume(i1 %cmp) - call void @llvm.lifetime.start.p1(i64 4, ptr addrspace(1) %alloca) + call void @llvm.lifetime.start.p1(ptr addrspace(1) %alloca) store volatile i32 0, ptr addrspace(1) %alloca - call void @llvm.lifetime.end.p1(i64 4, ptr addrspace(1) %alloca) + call void @llvm.lifetime.end.p1(ptr addrspace(1) %alloca) ret void } @@ -67,9 +67,9 @@ define void @dom_eq_null() { ; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr addrspace(1) [[ALLOCA]], null ; CHECK-NEXT: br i1 [[CMP]], label %[[IF:.*]], label %[[ELSE:.*]] ; CHECK: [[IF]]: -; CHECK-NEXT: call void @llvm.lifetime.start.p1(i64 4, ptr addrspace(1) [[ALLOCA]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p1(ptr addrspace(1) [[ALLOCA]]) ; CHECK-NEXT: store volatile i32 0, ptr addrspace(1) null, align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p1(i64 4, ptr addrspace(1) [[ALLOCA]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p1(ptr addrspace(1) [[ALLOCA]]) ; CHECK-NEXT: ret void ; CHECK: [[ELSE]]: ; CHECK-NEXT: ret void @@ -79,14 +79,14 @@ define void @dom_eq_null() { br i1 %cmp, label %if, label %else if: - call void @llvm.lifetime.start.p1(i64 4, ptr addrspace(1) %alloca) + call void @llvm.lifetime.start.p1(ptr addrspace(1) %alloca) store volatile i32 0, ptr addrspace(1) %alloca - call void @llvm.lifetime.end.p1(i64 4, ptr addrspace(1) %alloca) + call void @llvm.lifetime.end.p1(ptr addrspace(1) %alloca) ret void else: ret void } -declare void @llvm.lifetime.start.p0(i64 %S, ptr nocapture %P) readonly -declare void @llvm.lifetime.end.p0(i64 %S, ptr nocapture %P) +declare void @llvm.lifetime.start.p0(ptr nocapture %P) readonly +declare void @llvm.lifetime.end.p0(ptr nocapture %P) diff --git a/llvm/test/Transforms/GVN/opt-remarks.ll b/llvm/test/Transforms/GVN/opt-remarks.ll index 87cd54d..a5c3cb5c 100644 --- a/llvm/test/Transforms/GVN/opt-remarks.ll +++ b/llvm/test/Transforms/GVN/opt-remarks.ll @@ -109,9 +109,9 @@ entry: define i8 @lifetime_end(i8 %val) { %p = alloca [32 x i8] - call void @llvm.lifetime.start.p0(i64 32, ptr %p) + call void @llvm.lifetime.start.p0(ptr %p) store i8 %val, ptr %p - call void @llvm.lifetime.end.p0(i64 32, ptr %p) + call void @llvm.lifetime.end.p0(ptr %p) %1 = load i8, ptr %p ret i8 %1 } diff --git a/llvm/test/Transforms/GVN/vscale.ll b/llvm/test/Transforms/GVN/vscale.ll index 5d6c559..b358df5 100644 --- a/llvm/test/Transforms/GVN/vscale.ll +++ b/llvm/test/Transforms/GVN/vscale.ll @@ -696,7 +696,7 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 1 ; MDEP-LABEL: @bigexample( ; MDEP-NEXT: entry: ; MDEP-NEXT: [[REF_TMP:%.*]] = alloca { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> }, align 16 -; MDEP-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr nonnull [[REF_TMP]]) +; MDEP-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[REF_TMP]]) ; MDEP-NEXT: [[A_ELT:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } [[A:%.*]], 0 ; MDEP-NEXT: store <vscale x 4 x i32> [[A_ELT]], ptr [[REF_TMP]], align 16 ; MDEP-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() @@ -720,13 +720,13 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 1 ; MDEP-NEXT: [[TMP9:%.*]] = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP7]], <vscale x 16 x i8> [[TMP8]], 2 ; MDEP-NEXT: [[TMP10:%.*]] = bitcast <vscale x 4 x i32> [[A_ELT6]] to <vscale x 16 x i8> ; MDEP-NEXT: [[TMP11:%.*]] = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP9]], <vscale x 16 x i8> [[TMP10]], 3 -; MDEP-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr nonnull [[REF_TMP]]) +; MDEP-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[REF_TMP]]) ; MDEP-NEXT: ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP11]] ; ; MSSA-LABEL: @bigexample( ; MSSA-NEXT: entry: ; MSSA-NEXT: [[REF_TMP:%.*]] = alloca { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> }, align 16 -; MSSA-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr nonnull [[REF_TMP]]) +; MSSA-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[REF_TMP]]) ; MSSA-NEXT: [[A_ELT:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } [[A:%.*]], 0 ; MSSA-NEXT: store <vscale x 4 x i32> [[A_ELT]], ptr [[REF_TMP]], align 16 ; MSSA-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() @@ -750,12 +750,12 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 1 ; MSSA-NEXT: [[TMP6:%.*]] = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP5]], <vscale x 16 x i8> [[DOTUNPACK10]], 2 ; MSSA-NEXT: [[DOTUNPACK12:%.*]] = load <vscale x 16 x i8>, ptr [[REF_TMP_REPACK5]], align 16 ; MSSA-NEXT: [[TMP7:%.*]] = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP6]], <vscale x 16 x i8> [[DOTUNPACK12]], 3 -; MSSA-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr nonnull [[REF_TMP]]) +; MSSA-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[REF_TMP]]) ; MSSA-NEXT: ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP7]] ; entry: %ref.tmp = alloca { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> }, align 16 - call void @llvm.lifetime.start.p0(i64 -1, ptr nonnull %ref.tmp) + call void @llvm.lifetime.start.p0(ptr nonnull %ref.tmp) %a.elt = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } %a, 0 store <vscale x 4 x i32> %a.elt, ptr %ref.tmp, align 16 %0 = call i64 @llvm.vscale.i64() @@ -790,7 +790,7 @@ entry: %.elt11 = getelementptr inbounds i8, ptr %ref.tmp, i64 %14 %.unpack12 = load <vscale x 16 x i8>, ptr %.elt11, align 16 %15 = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %12, <vscale x 16 x i8> %.unpack12, 3 - call void @llvm.lifetime.end.p0(i64 -1, ptr nonnull %ref.tmp) + call void @llvm.lifetime.end.p0(ptr nonnull %ref.tmp) ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %15 } diff --git a/llvm/test/Transforms/GVNHoist/pr29034.ll b/llvm/test/Transforms/GVNHoist/pr29034.ll index f5378ea..a5294c5 100644 --- a/llvm/test/Transforms/GVNHoist/pr29034.ll +++ b/llvm/test/Transforms/GVNHoist/pr29034.ll @@ -37,7 +37,7 @@ define void @music_task(ptr nocapture readnone %p) local_unnamed_addr { entry: %mapi = alloca ptr, align 8 - call void @llvm.lifetime.start.p0(i64 8, ptr %mapi) + call void @llvm.lifetime.start.p0(ptr %mapi) store ptr null, ptr %mapi, align 8, !tbaa !1 %call = call i32 @music_decoder_init(ptr nonnull %mapi) br label %while.cond @@ -99,7 +99,7 @@ while.cond2.backedge: ; preds = %sw.default, %sw.bb1 br label %while.cond2 } -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) declare i32 @music_decoder_init(ptr) declare i32 @music_play_api(ptr, i32, i32, i32, ptr) declare i32 @printf(ptr nocapture readonly, ...) diff --git a/llvm/test/Transforms/GVNSink/lifetime.ll b/llvm/test/Transforms/GVNSink/lifetime.ll index 1a8a69b..f8731e5 100644 --- a/llvm/test/Transforms/GVNSink/lifetime.ll +++ b/llvm/test/Transforms/GVNSink/lifetime.ll @@ -9,34 +9,34 @@ define void @test_cant_sink(i1 %c) { ; CHECK-SAME: i1 [[C:%.*]]) { ; CHECK-NEXT: [[A:%.*]] = alloca i8, align 1 ; CHECK-NEXT: [[B:%.*]] = alloca i8, align 1 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[A]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[B]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B]]) ; CHECK-NEXT: br i1 [[C]], label %[[IF:.*]], label %[[ELSE:.*]] ; CHECK: [[IF]]: ; CHECK-NEXT: store i64 1, ptr [[A]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]]) ; CHECK-NEXT: br label %[[JOIN:.*]] ; CHECK: [[ELSE]]: ; CHECK-NEXT: store i64 1, ptr [[B]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[B]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B]]) ; CHECK-NEXT: br label %[[JOIN]] ; CHECK: [[JOIN]]: ; CHECK-NEXT: ret void ; %a = alloca i8 %b = alloca i8 - call void @llvm.lifetime.start(i64 1, ptr %a) - call void @llvm.lifetime.start(i64 1, ptr %b) + call void @llvm.lifetime.start(ptr %a) + call void @llvm.lifetime.start(ptr %b) br i1 %c, label %if, label %else if: store i64 1, ptr %a - call void @llvm.lifetime.end(i64 1, ptr %a) + call void @llvm.lifetime.end(ptr %a) br label %join else: store i64 1, ptr %b - call void @llvm.lifetime.end(i64 1, ptr %b) + call void @llvm.lifetime.end(ptr %b) br label %join join: @@ -47,7 +47,7 @@ define void @test_can_sink(i1 %c) { ; CHECK-LABEL: define void @test_can_sink( ; CHECK-SAME: i1 [[C:%.*]]) { ; CHECK-NEXT: [[A:%.*]] = alloca i8, align 1 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]]) ; CHECK-NEXT: br i1 [[C]], label %[[IF:.*]], label %[[ELSE:.*]] ; CHECK: [[IF]]: ; CHECK-NEXT: br label %[[JOIN:.*]] @@ -55,21 +55,21 @@ define void @test_can_sink(i1 %c) { ; CHECK-NEXT: br label %[[JOIN]] ; CHECK: [[JOIN]]: ; CHECK-NEXT: store i64 1, ptr [[A]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]]) ; CHECK-NEXT: ret void ; %a = alloca i8 - call void @llvm.lifetime.start(i64 1, ptr %a) + call void @llvm.lifetime.start(ptr %a) br i1 %c, label %if, label %else if: store i64 1, ptr %a - call void @llvm.lifetime.end(i64 1, ptr %a) + call void @llvm.lifetime.end(ptr %a) br label %join else: store i64 1, ptr %a - call void @llvm.lifetime.end(i64 1, ptr %a) + call void @llvm.lifetime.end(ptr %a) br label %join join: diff --git a/llvm/test/Transforms/GlobalOpt/dead-store-status.ll b/llvm/test/Transforms/GlobalOpt/dead-store-status.ll index 9a8fbb8..7cb3a96 100644 --- a/llvm/test/Transforms/GlobalOpt/dead-store-status.ll +++ b/llvm/test/Transforms/GlobalOpt/dead-store-status.ll @@ -24,17 +24,17 @@ entry: define i16 @bar() local_unnamed_addr #1 { entry: %local2 = alloca [1 x i16], align 1 - call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %local2) + call void @llvm.lifetime.start.p0(ptr nonnull %local2) store ptr %local2, ptr @global, align 1 - call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %local2) + call void @llvm.lifetime.end.p0(ptr nonnull %local2) ret i16 undef } ; Function Attrs: argmemonly nounwind willreturn -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #2 +declare void @llvm.lifetime.start.p0(ptr nocapture) #2 ; Function Attrs: argmemonly nounwind willreturn -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #2 +declare void @llvm.lifetime.end.p0(ptr nocapture) #2 attributes #0 = { nofree noinline norecurse nounwind writeonly } attributes #1 = { noinline nounwind writeonly } diff --git a/llvm/test/Transforms/HotColdSplit/lifetime-markers-on-inputs-1.ll b/llvm/test/Transforms/HotColdSplit/lifetime-markers-on-inputs-1.ll index e5bab0c..28782d5 100644 --- a/llvm/test/Transforms/HotColdSplit/lifetime-markers-on-inputs-1.ll +++ b/llvm/test/Transforms/HotColdSplit/lifetime-markers-on-inputs-1.ll @@ -1,8 +1,8 @@ ; RUN: opt -S -passes=hotcoldsplit -hotcoldsplit-threshold=0 < %s 2>&1 | FileCheck %s -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) declare void @use(ptr) @@ -18,17 +18,17 @@ entry: normalPath: ; These two uses of stack slots are non-overlapping. Based on this alone, ; the stack slots could be merged. - call void @llvm.lifetime.start.p0(i64 1, ptr %local1) + call void @llvm.lifetime.start.p0(ptr %local1) call void @use(ptr %local1) - call void @llvm.lifetime.end.p0(i64 1, ptr %local1) - call void @llvm.lifetime.start.p0(i64 1, ptr %local2) + call void @llvm.lifetime.end.p0(ptr %local1) + call void @llvm.lifetime.start.p0(ptr %local2) call void @use(ptr %local2) - call void @llvm.lifetime.end.p0(i64 1, ptr %local2) + call void @llvm.lifetime.end.p0(ptr %local2) ret void ; CHECK-LABEL: codeRepl: -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr %local1) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr %local2) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %local1) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %local2) ; CHECK-NEXT: call i1 @foo.cold.1(ptr %local1, ptr %local2) ; CHECK-NEXT: br i1 @@ -36,19 +36,19 @@ outlinedPath: ; These two uses of stack slots are overlapping. This should prevent ; merging of stack slots. CodeExtractor must replicate the effects of ; these markers in the caller to inhibit stack coloring. - call void @llvm.lifetime.start.p0(i64 1, ptr %local1) - call void @llvm.lifetime.start.p0(i64 1, ptr %local2) + call void @llvm.lifetime.start.p0(ptr %local1) + call void @llvm.lifetime.start.p0(ptr %local2) call void @cold_use2(ptr %local1, ptr %local2) - call void @llvm.lifetime.end.p0(i64 1, ptr %local1) - call void @llvm.lifetime.end.p0(i64 1, ptr %local2) + call void @llvm.lifetime.end.p0(ptr %local1) + call void @llvm.lifetime.end.p0(ptr %local2) br i1 undef, label %outlinedPath2, label %outlinedPathExit outlinedPath2: ; These extra lifetime markers are used to test that we emit only one ; pair of guard markers in the caller per memory object. - call void @llvm.lifetime.start.p0(i64 1, ptr %local2) + call void @llvm.lifetime.start.p0(ptr %local2) call void @use(ptr %local2) - call void @llvm.lifetime.end.p0(i64 1, ptr %local2) + call void @llvm.lifetime.end.p0(ptr %local2) ret void outlinedPathExit: diff --git a/llvm/test/Transforms/HotColdSplit/lifetime-markers-on-inputs-2.ll b/llvm/test/Transforms/HotColdSplit/lifetime-markers-on-inputs-2.ll index e42db78..da7a9b8 100644 --- a/llvm/test/Transforms/HotColdSplit/lifetime-markers-on-inputs-2.ll +++ b/llvm/test/Transforms/HotColdSplit/lifetime-markers-on-inputs-2.ll @@ -1,9 +1,9 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt -S -passes=hotcoldsplit -hotcoldsplit-threshold=0 < %s 2>&1 | FileCheck %s -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) declare void @cold_use(ptr) cold @@ -40,13 +40,13 @@ define void @only_lifetime_start_is_cold(i1 %arg) { ; CHECK-NEXT: [[LOCAL1:%.*]] = alloca i256, align 8 ; CHECK-NEXT: br i1 [[ARG:%.*]], label [[CODEREPL:%.*]], label [[NO_EXTRACT1:%.*]] ; CHECK: codeRepl: -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[LOCAL1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[LOCAL1]]) ; CHECK-NEXT: [[TARGETBLOCK:%.*]] = call i1 @only_lifetime_start_is_cold.cold.1(ptr [[LOCAL1]], i1 [[ARG]]) #[[ATTR3:[0-9]+]] ; CHECK-NEXT: br i1 [[TARGETBLOCK]], label [[NO_EXTRACT1]], label [[EXIT:%.*]] ; CHECK: no-extract1: ; CHECK-NEXT: br label [[EXIT]] ; CHECK: exit: -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[LOCAL1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[LOCAL1]]) ; CHECK-NEXT: ret void ; entry: @@ -55,7 +55,7 @@ entry: extract1: ; lt.start - call void @llvm.lifetime.start.p0(i64 1, ptr %local1) + call void @llvm.lifetime.start.p0(ptr %local1) call void @cold_use(ptr %local1) br i1 %arg, label %extract2, label %no-extract1 @@ -67,7 +67,7 @@ no-extract1: exit: ; lt.end - call void @llvm.lifetime.end.p0(i64 1, ptr %local1) + call void @llvm.lifetime.end.p0(ptr %local1) ret void } @@ -96,10 +96,10 @@ define void @only_lifetime_end_is_cold(i1 %arg) { ; CHECK-LABEL: @only_lifetime_end_is_cold( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[LOCAL1:%.*]] = alloca i256, align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[LOCAL1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[LOCAL1]]) ; CHECK-NEXT: br i1 [[ARG:%.*]], label [[NO_EXTRACT1:%.*]], label [[CODEREPL:%.*]] ; CHECK: no-extract1: -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[LOCAL1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[LOCAL1]]) ; CHECK-NEXT: br label [[EXIT:%.*]] ; CHECK: codeRepl: ; CHECK-NEXT: call void @only_lifetime_end_is_cold.cold.1(ptr [[LOCAL1]]) #[[ATTR3]] @@ -110,18 +110,18 @@ define void @only_lifetime_end_is_cold(i1 %arg) { entry: ; lt.start %local1 = alloca i256 - call void @llvm.lifetime.start.p0(i64 1, ptr %local1) + call void @llvm.lifetime.start.p0(ptr %local1) br i1 %arg, label %no-extract1, label %extract1 no-extract1: ; lt.end - call void @llvm.lifetime.end.p0(i64 1, ptr %local1) + call void @llvm.lifetime.end.p0(ptr %local1) br label %exit extract1: ; lt.end call void @cold_use(ptr %local1) - call void @llvm.lifetime.end.p0(i64 1, ptr %local1) + call void @llvm.lifetime.end.p0(ptr %local1) br label %exit exit: @@ -134,7 +134,7 @@ define void @do_not_lift_lifetime_end(i1 %arg) { ; CHECK-LABEL: @do_not_lift_lifetime_end( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[LOCAL1:%.*]] = alloca i256, align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[LOCAL1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[LOCAL1]]) ; CHECK-NEXT: br label [[HEADER:%.*]] ; CHECK: header: ; CHECK-NEXT: call void @use(ptr [[LOCAL1]]) @@ -148,7 +148,7 @@ define void @do_not_lift_lifetime_end(i1 %arg) { entry: ; lt.start %local1 = alloca i256 - call void @llvm.lifetime.start.p0(i64 1, ptr %local1) + call void @llvm.lifetime.start.p0(ptr %local1) br label %header header: @@ -167,7 +167,7 @@ extract2: extract3: ; lt.end - call void @llvm.lifetime.end.p0(i64 1, ptr %local1) + call void @llvm.lifetime.end.p0(ptr %local1) br label %exit exit: diff --git a/llvm/test/Transforms/HotColdSplit/lifetime-markers-on-inputs-3.ll b/llvm/test/Transforms/HotColdSplit/lifetime-markers-on-inputs-3.ll index 26faaa3..b453c61 100644 --- a/llvm/test/Transforms/HotColdSplit/lifetime-markers-on-inputs-3.ll +++ b/llvm/test/Transforms/HotColdSplit/lifetime-markers-on-inputs-3.ll @@ -3,9 +3,9 @@ %type1 = type opaque %type2 = type opaque -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) declare void @use(ptr, ptr) @@ -23,16 +23,16 @@ normalPath: ret void ; CHECK-LABEL: codeRepl: -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr %local1) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr %local2) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %local1) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %local2) ; CHECK-NEXT: call void @foo.cold.1(ptr %local1, ptr %local2 outlinedPath: - call void @llvm.lifetime.start.p0(i64 1, ptr %local1) - call void @llvm.lifetime.start.p0(i64 1, ptr %local2) + call void @llvm.lifetime.start.p0(ptr %local1) + call void @llvm.lifetime.start.p0(ptr %local2) call void @use2(ptr %local1, ptr %local2) - call void @llvm.lifetime.end.p0(i64 1, ptr %local1) - call void @llvm.lifetime.end.p0(i64 1, ptr %local2) + call void @llvm.lifetime.end.p0(ptr %local1) + call void @llvm.lifetime.end.p0(ptr %local2) br label %outlinedPathExit outlinedPathExit: diff --git a/llvm/test/Transforms/HotColdSplit/sink-multiple-bitcasts-of-allocas-pr42451.ll b/llvm/test/Transforms/HotColdSplit/sink-multiple-bitcasts-of-allocas-pr42451.ll index df7cb3c..80249fc 100644 --- a/llvm/test/Transforms/HotColdSplit/sink-multiple-bitcasts-of-allocas-pr42451.ll +++ b/llvm/test/Transforms/HotColdSplit/sink-multiple-bitcasts-of-allocas-pr42451.ll @@ -6,8 +6,8 @@ target triple = "x86_64-apple-macosx10.14.0" @c = common global i32 0, align 4 @h = common global i32 0, align 4 -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #0 -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #0 +declare void @llvm.lifetime.start.p0(ptr nocapture) #0 +declare void @llvm.lifetime.end.p0(ptr nocapture) #0 declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg) #1 declare ptr @m() @@ -27,15 +27,15 @@ bb: bb3: ; preds = %bb %i4 = call ptr @m() - call void @llvm.lifetime.start.p0(i64 20, ptr %.sroa.4.i) - call void @llvm.lifetime.start.p0(i64 6, ptr %.sroa.5.i) + call void @llvm.lifetime.start.p0(ptr %.sroa.4.i) + call void @llvm.lifetime.start.p0(ptr %.sroa.5.i) call void @llvm.memset.p0.i64(ptr align 2 %.sroa.4.i, i8 0, i64 20, i1 false) call void @llvm.memset.p0.i64(ptr align 8 %.sroa.5.i, i8 0, i64 6, i1 false) %i5 = load i32, ptr @c, align 4, !tbaa !4 %i6 = trunc i32 %i5 to i16 - call void @llvm.lifetime.end.p0(i64 20, ptr %.sroa.4.i) - call void @llvm.lifetime.end.p0(i64 6, ptr %.sroa.5.i) - call void @llvm.lifetime.start.p0(i64 6, ptr %.sroa.5.i) + call void @llvm.lifetime.end.p0(ptr %.sroa.4.i) + call void @llvm.lifetime.end.p0(ptr %.sroa.5.i) + call void @llvm.lifetime.start.p0(ptr %.sroa.5.i) call void @llvm.memset.p0.i64(ptr align 1 %.sroa.5.i, i8 3, i64 6, i1 false) br label %bb7 @@ -47,7 +47,7 @@ bb7: ; preds = %bb7, %bb3 br i1 %i10, label %bb7, label %l.exit l.exit: ; preds = %bb7 - call void @llvm.lifetime.end.p0(i64 6, ptr %.sroa.5.i) + call void @llvm.lifetime.end.p0(ptr %.sroa.5.i) br label %bb11 bb11: ; preds = %l.exit, %bb diff --git a/llvm/test/Transforms/IRNormalizer/regression-convergence-tokens.ll b/llvm/test/Transforms/IRNormalizer/regression-convergence-tokens.ll index 88eff97..0c2db4a 100644 --- a/llvm/test/Transforms/IRNormalizer/regression-convergence-tokens.ll +++ b/llvm/test/Transforms/IRNormalizer/regression-convergence-tokens.ll @@ -7,9 +7,9 @@ define i32 @nested(i32 %src) #0 { ; CHECK-SAME: i32 [[A0:%.*]]) #[[ATTR0:[0-9]+]] { ; CHECK-NEXT: [[BB15160:.*:]] ; CHECK-NEXT: [[T1:%.*]] = call token @llvm.experimental.convergence.entry() -; CHECK-NEXT: %"vl77672llvm.experimental.convergence.anchor()" = call token @llvm.experimental.convergence.anchor() -; CHECK-NEXT: %"op68297(vl77672)" = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[A0]]) [ "convergencectrl"(token %"vl77672llvm.experimental.convergence.anchor()") ] -; CHECK-NEXT: ret i32 %"op68297(vl77672)" +; CHECK-NEXT: %"vl14659llvm.experimental.convergence.anchor()" = call token @llvm.experimental.convergence.anchor() +; CHECK-NEXT: %"op15516(vl14659)" = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[A0]]) [ "convergencectrl"(token %"vl14659llvm.experimental.convergence.anchor()") ] +; CHECK-NEXT: ret i32 %"op15516(vl14659)" ; %t1 = call token @llvm.experimental.convergence.entry() %t2 = call token @llvm.experimental.convergence.anchor() diff --git a/llvm/test/Transforms/IRNormalizer/regression-infinite-loop.ll b/llvm/test/Transforms/IRNormalizer/regression-infinite-loop.ll index 35ac0fd..b9be105 100644 --- a/llvm/test/Transforms/IRNormalizer/regression-infinite-loop.ll +++ b/llvm/test/Transforms/IRNormalizer/regression-infinite-loop.ll @@ -8,18 +8,18 @@ define void @test(ptr, i32) { ; CHECK-NEXT: %"vl72693([[A1]], 1)" = add i32 [[A1]], 1 ; CHECK-NEXT: br label %[[BB16110:.*]] ; CHECK: [[BB16110]]: -; CHECK-NEXT: %"op10912(op18080, vl72693)" = phi i32 [ %"op18080(op10412, op17645)", %[[BB16110]] ], [ %"vl72693([[A1]], 1)", %[[BB76951]] ] -; CHECK-NEXT: %"op10912(op17645, vl72693)" = phi i32 [ %"op17645(op10912)70", %[[BB16110]] ], [ %"vl72693([[A1]], 1)", %[[BB76951]] ] -; CHECK-NEXT: %"op15084(op10912)" = mul i32 %"op10912(op18080, vl72693)", undef -; CHECK-NEXT: %"op16562(op15084)" = xor i32 -1, %"op15084(op10912)" -; CHECK-NEXT: %"op44627(op10912, op16562)" = add i32 %"op10912(op18080, vl72693)", %"op16562(op15084)" -; CHECK-NEXT: %"op17645(op10912)" = add i32 -1, %"op10912(op17645, vl72693)" -; CHECK-NEXT: %"op18080(op17645, op44627)" = add i32 %"op17645(op10912)", %"op44627(op10912, op16562)" -; CHECK-NEXT: %"op17720(op15084, op18080)" = mul i32 %"op15084(op10912)", %"op18080(op17645, op44627)" -; CHECK-NEXT: %"op16562(op17720)" = xor i32 -1, %"op17720(op15084, op18080)" -; CHECK-NEXT: %"op17430(op16562, op18080)" = add i32 %"op16562(op17720)", %"op18080(op17645, op44627)" +; CHECK-NEXT: %"op81283(op18080, vl72693)" = phi i32 [ %"op18080(op10412, op18131)", %[[BB16110]] ], [ %"vl72693([[A1]], 1)", %[[BB76951]] ] +; CHECK-NEXT: %"op81283(op18131, vl72693)" = phi i32 [ %"op18131(op81283)70", %[[BB16110]] ], [ %"vl72693([[A1]], 1)", %[[BB76951]] ] +; CHECK-NEXT: %"op13219(op81283)" = mul i32 %"op81283(op18080, vl72693)", undef +; CHECK-NEXT: %"op16562(op13219)" = xor i32 -1, %"op13219(op81283)" +; CHECK-NEXT: %"op12556(op16562, op81283)" = add i32 %"op16562(op13219)", %"op81283(op18080, vl72693)" +; CHECK-NEXT: %"op18131(op81283)" = add i32 -1, %"op81283(op18131, vl72693)" +; CHECK-NEXT: %"op18080(op12556, op18131)" = add i32 %"op12556(op16562, op81283)", %"op18131(op81283)" +; CHECK-NEXT: %"op17720(op13219, op18080)" = mul i32 %"op13219(op81283)", %"op18080(op12556, op18131)" +; CHECK-NEXT: %"op16562(op17720)" = xor i32 -1, %"op17720(op13219, op18080)" +; CHECK-NEXT: %"op17430(op16562, op18080)" = add i32 %"op16562(op17720)", %"op18080(op12556, op18131)" ; CHECK-NEXT: %"op10412(op17430)" = add i32 %"op17430(op16562, op18080)", undef -; CHECK-NEXT: %"op17720(op10412, op17720)" = mul i32 %"op10412(op17430)", %"op17720(op15084, op18080)" +; CHECK-NEXT: %"op17720(op10412, op17720)" = mul i32 %"op10412(op17430)", %"op17720(op13219, op18080)" ; CHECK-NEXT: %"op16562(op17720)1" = xor i32 -1, %"op17720(op10412, op17720)" ; CHECK-NEXT: %"op17430(op10412, op16562)" = add i32 %"op10412(op17430)", %"op16562(op17720)1" ; CHECK-NEXT: %"op10412(op17430)2" = add i32 %"op17430(op10412, op16562)", undef @@ -45,11 +45,11 @@ define void @test(ptr, i32) { ; CHECK-NEXT: %"op17720(op10412, op17720)21" = mul i32 %"op10412(op17430)20", %"op17720(op10412, op17720)17" ; CHECK-NEXT: %"op16562(op17720)22" = xor i32 -1, %"op17720(op10412, op17720)21" ; CHECK-NEXT: %"op17430(op10412, op16562)23" = add i32 %"op10412(op17430)20", %"op16562(op17720)22" -; CHECK-NEXT: %"op17645(op10912)24" = add i32 -9, %"op10912(op17645, vl72693)" -; CHECK-NEXT: %"op18080(op17430, op17645)" = add i32 %"op17430(op10412, op16562)23", %"op17645(op10912)24" -; CHECK-NEXT: %"op17720(op17720, op18080)" = mul i32 %"op17720(op10412, op17720)21", %"op18080(op17430, op17645)" +; CHECK-NEXT: %"op18131(op81283)24" = add i32 -9, %"op81283(op18131, vl72693)" +; CHECK-NEXT: %"op18080(op17430, op18131)" = add i32 %"op17430(op10412, op16562)23", %"op18131(op81283)24" +; CHECK-NEXT: %"op17720(op17720, op18080)" = mul i32 %"op17720(op10412, op17720)21", %"op18080(op17430, op18131)" ; CHECK-NEXT: %"op16562(op17720)25" = xor i32 -1, %"op17720(op17720, op18080)" -; CHECK-NEXT: %"op17430(op16562, op18080)26" = add i32 %"op16562(op17720)25", %"op18080(op17430, op17645)" +; CHECK-NEXT: %"op17430(op16562, op18080)26" = add i32 %"op16562(op17720)25", %"op18080(op17430, op18131)" ; CHECK-NEXT: %"op10412(op17430)27" = add i32 %"op17430(op16562, op18080)26", undef ; CHECK-NEXT: %"op17720(op10412, op17720)28" = mul i32 %"op10412(op17430)27", %"op17720(op17720, op18080)" ; CHECK-NEXT: %"op16562(op17720)29" = xor i32 -1, %"op17720(op10412, op17720)28" @@ -66,11 +66,11 @@ define void @test(ptr, i32) { ; CHECK-NEXT: %"op17720(op10412, op17720)40" = mul i32 %"op10412(op17430)39", %"op17720(op10412, op17720)36" ; CHECK-NEXT: %"op16562(op17720)41" = xor i32 -1, %"op17720(op10412, op17720)40" ; CHECK-NEXT: %"op17430(op10412, op16562)42" = add i32 %"op10412(op17430)39", %"op16562(op17720)41" -; CHECK-NEXT: %"op17645(op10912)43" = add i32 -14, %"op10912(op17645, vl72693)" -; CHECK-NEXT: %"op18080(op17430, op17645)44" = add i32 %"op17430(op10412, op16562)42", %"op17645(op10912)43" -; CHECK-NEXT: %"op17720(op17720, op18080)45" = mul i32 %"op17720(op10412, op17720)40", %"op18080(op17430, op17645)44" +; CHECK-NEXT: %"op18131(op81283)43" = add i32 -14, %"op81283(op18131, vl72693)" +; CHECK-NEXT: %"op18080(op17430, op18131)44" = add i32 %"op17430(op10412, op16562)42", %"op18131(op81283)43" +; CHECK-NEXT: %"op17720(op17720, op18080)45" = mul i32 %"op17720(op10412, op17720)40", %"op18080(op17430, op18131)44" ; CHECK-NEXT: %"op16562(op17720)46" = xor i32 -1, %"op17720(op17720, op18080)45" -; CHECK-NEXT: %"op17430(op16562, op18080)47" = add i32 %"op16562(op17720)46", %"op18080(op17430, op17645)44" +; CHECK-NEXT: %"op17430(op16562, op18080)47" = add i32 %"op16562(op17720)46", %"op18080(op17430, op18131)44" ; CHECK-NEXT: %"op10412(op17430)48" = add i32 %"op17430(op16562, op18080)47", undef ; CHECK-NEXT: %"op17720(op10412, op17720)49" = mul i32 %"op10412(op17430)48", %"op17720(op17720, op18080)45" ; CHECK-NEXT: %"op16562(op17720)50" = xor i32 -1, %"op17720(op10412, op17720)49" @@ -93,9 +93,9 @@ define void @test(ptr, i32) { ; CHECK-NEXT: %"op17430(op10412, op16562)67" = add i32 %"op10412(op17430)64", %"op16562(op17720)66" ; CHECK-NEXT: %"op10412(op17430)68" = add i32 %"op17430(op10412, op16562)67", undef ; CHECK-NEXT: %"op10412(op10412)69" = add i32 %"op10412(op17430)68", undef -; CHECK-NEXT: %"op17645(op10912)70" = add i32 -21, %"op10912(op17645, vl72693)" -; CHECK-NEXT: %"op18080(op10412, op17645)" = add i32 %"op10412(op10412)69", %"op17645(op10912)70" -; CHECK-NEXT: store i32 %"op18080(op10412, op17645)", ptr [[A0]], align 4 +; CHECK-NEXT: %"op18131(op81283)70" = add i32 -21, %"op81283(op18131, vl72693)" +; CHECK-NEXT: %"op18080(op10412, op18131)" = add i32 %"op10412(op10412)69", %"op18131(op81283)70" +; CHECK-NEXT: store i32 %"op18080(op10412, op18131)", ptr [[A0]], align 4 ; CHECK-NEXT: br label %[[BB16110]] ; bb: diff --git a/llvm/test/Transforms/IRNormalizer/reordering-basic.ll b/llvm/test/Transforms/IRNormalizer/reordering-basic.ll index fd09ce0..06e67e0 100644 --- a/llvm/test/Transforms/IRNormalizer/reordering-basic.ll +++ b/llvm/test/Transforms/IRNormalizer/reordering-basic.ll @@ -28,16 +28,16 @@ define double @baz(double %x) { ; CHECK-SAME: double [[A0:%.*]]) { ; CHECK-NEXT: [[BB76951:.*:]] ; CHECK-NEXT: [[IFCOND:%.*]] = fcmp one double [[A0]], 0.000000e+00 -; CHECK-NEXT: br i1 [[IFCOND]], label %[[BB91455:.*]], label %[[BB914551:.*]] -; CHECK: [[BB91455]]: -; CHECK-NEXT: %"vl15001bir()" = call double @bir() +; CHECK-NEXT: br i1 [[IFCOND]], label %[[BB47054:.*]], label %[[BB470541:.*]] +; CHECK: [[BB47054]]: +; CHECK-NEXT: %"vl16994bir()" = call double @bir() ; CHECK-NEXT: br label %[[BB17254:.*]] -; CHECK: [[BB914551]]: -; CHECK-NEXT: %"vl69719bar()" = call double @bar() +; CHECK: [[BB470541]]: +; CHECK-NEXT: %"vl88592bar()" = call double @bar() ; CHECK-NEXT: br label %[[BB17254]] ; CHECK: [[BB17254]]: -; CHECK-NEXT: %"op19734(vl15001, vl69719)" = phi double [ %"vl15001bir()", %[[BB91455]] ], [ %"vl69719bar()", %[[BB914551]] ] -; CHECK-NEXT: ret double %"op19734(vl15001, vl69719)" +; CHECK-NEXT: %"op16411(vl16994, vl88592)" = phi double [ %"vl16994bir()", %[[BB47054]] ], [ %"vl88592bar()", %[[BB470541]] ] +; CHECK-NEXT: ret double %"op16411(vl16994, vl88592)" ; entry: %ifcond = fcmp one double %x, 0.000000e+00 diff --git a/llvm/test/Transforms/IRNormalizer/reordering.ll b/llvm/test/Transforms/IRNormalizer/reordering.ll index 64abe8e..a3dbcb5 100644 --- a/llvm/test/Transforms/IRNormalizer/reordering.ll +++ b/llvm/test/Transforms/IRNormalizer/reordering.ll @@ -23,7 +23,7 @@ declare void @effecting() ; Place dead instruction(s) before the terminator define void @call_effecting() { ; CHECK-LABEL: define void @call_effecting() { -; CHECK-NEXT: bb15160: +; CHECK-NEXT: bb14885: ; CHECK-NEXT: call void @effecting() ; CHECK-NEXT: [[TMP0:%.*]] = add i32 0, 1 ; CHECK-NEXT: ret void @@ -51,7 +51,7 @@ exit: define void @dont_move_above_alloca() { ; CHECK-LABEL: define void @dont_move_above_alloca() { -; CHECK-NEXT: bb15160: +; CHECK-NEXT: bb14885: ; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4 ; CHECK-NEXT: call void @effecting() ; CHECK-NEXT: ret void @@ -65,7 +65,7 @@ declare void @effecting1() define void @dont_reorder_effecting() { ; CHECK-LABEL: define void @dont_reorder_effecting() { -; CHECK-NEXT: bb10075: +; CHECK-NEXT: bb45003: ; CHECK-NEXT: call void @effecting() ; CHECK-NEXT: call void @effecting1() ; CHECK-NEXT: ret void @@ -79,7 +79,7 @@ declare void @effecting2(i32) define void @dont_reorder_effecting1() { ; CHECK-LABEL: define void @dont_reorder_effecting1() { -; CHECK-NEXT: bb10075: +; CHECK-NEXT: bb45003: ; CHECK-NEXT: [[ONE:%.*]] = add i32 1, 1 ; CHECK-NEXT: call void @effecting2(i32 [[ONE]]) ; CHECK-NEXT: [[TWO:%.*]] = add i32 2, 2 diff --git a/llvm/test/Transforms/IROutliner/alloca-addrspace-1.ll b/llvm/test/Transforms/IROutliner/alloca-addrspace-1.ll index a096e6d..73db71b 100644 --- a/llvm/test/Transforms/IROutliner/alloca-addrspace-1.ll +++ b/llvm/test/Transforms/IROutliner/alloca-addrspace-1.ll @@ -20,14 +20,14 @@ declare i32 @llvm.foo(i32, i32) ; CHECK-NEXT: bb: ; CHECK-NEXT: [[I3_LOC:%.*]] = alloca i32, align 4, addrspace(5) ; CHECK-NEXT: [[I1_LOC:%.*]] = alloca i32, align 4, addrspace(5) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 -1, ptr addrspace(5) [[I1_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[I1_LOC]]) ; CHECK-NEXT: call void @outlined_ir_func_0(i32 0, i32 1, ptr addrspace(5) [[I1_LOC]]) ; CHECK-NEXT: [[I1_RELOAD:%.*]] = load i32, ptr addrspace(5) [[I1_LOC]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 -1, ptr addrspace(5) [[I1_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 -1, ptr addrspace(5) [[I3_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[I1_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[I3_LOC]]) ; CHECK-NEXT: call void @outlined_ir_func_0(i32 [[I1_RELOAD]], i32 0, ptr addrspace(5) [[I3_LOC]]) ; CHECK-NEXT: [[I3_RELOAD:%.*]] = load i32, ptr addrspace(5) [[I3_LOC]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 -1, ptr addrspace(5) [[I3_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[I3_LOC]]) ; CHECK-NEXT: [[I4:%.*]] = tail call i32 @llvm.foo(i32 [[I3_RELOAD]], i32 0) ; CHECK-NEXT: ret i32 0 ; diff --git a/llvm/test/Transforms/IROutliner/alloca-addrspace.ll b/llvm/test/Transforms/IROutliner/alloca-addrspace.ll index e870150..ed76444 100644 --- a/llvm/test/Transforms/IROutliner/alloca-addrspace.ll +++ b/llvm/test/Transforms/IROutliner/alloca-addrspace.ll @@ -18,10 +18,10 @@ declare i32 @func(i32, i32) ; CHECK-LABEL: define {{[^@]+}}@outlineable() { ; CHECK-NEXT: bb: ; CHECK-NEXT: [[I1_LOC:%.*]] = alloca i32, align 4, addrspace(5) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 -1, ptr addrspace(5) [[I1_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[I1_LOC]]) ; CHECK-NEXT: call void @outlined_ir_func_0(i32 0, i32 1, ptr addrspace(5) [[I1_LOC]], i32 0) ; CHECK-NEXT: [[I1_RELOAD:%.*]] = load i32, ptr addrspace(5) [[I1_LOC]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 -1, ptr addrspace(5) [[I1_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[I1_LOC]]) ; CHECK-NEXT: call void @outlined_ir_func_0(i32 [[I1_RELOAD]], i32 0, ptr addrspace(5) null, i32 -1) ; CHECK-NEXT: ret i32 0 ; diff --git a/llvm/test/Transforms/IROutliner/different-intrinsics.ll b/llvm/test/Transforms/IROutliner/different-intrinsics.ll index 5fb22c3..f0e43bb 100644 --- a/llvm/test/Transforms/IROutliner/different-intrinsics.ll +++ b/llvm/test/Transforms/IROutliner/different-intrinsics.ll @@ -31,18 +31,18 @@ entry: ; CHECK-NEXT: [[B_LOC:%.*]] = alloca i8, align 1 ; CHECK-NEXT: [[A_LOC:%.*]] = alloca i8, align 1 ; CHECK-NEXT: [[RET_LOC:%.*]] = alloca i8, align 1 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[A_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B_LOC]]) ; CHECK-NEXT: call void @outlined_ir_func_1(ptr [[S:%.*]], ptr [[D:%.*]], ptr [[A_LOC]], ptr [[B_LOC]]) ; CHECK-NEXT: [[A_RELOAD:%.*]] = load i8, ptr [[A_LOC]], align 1 ; CHECK-NEXT: [[B_RELOAD:%.*]] = load i8, ptr [[B_LOC]], align 1 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B_LOC]]) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[D]], ptr [[S]], i64 [[LEN:%.*]], i1 false) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[RET_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[RET_LOC]]) ; CHECK-NEXT: call void @outlined_ir_func_0(i8 [[A_RELOAD]], i8 [[B_RELOAD]], ptr [[S]], ptr [[RET_LOC]]) ; CHECK-NEXT: [[RET_RELOAD:%.*]] = load i8, ptr [[RET_LOC]], align 1 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[RET_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[RET_LOC]]) ; CHECK-NEXT: ret i8 [[RET_RELOAD]] ; ; @@ -51,18 +51,18 @@ entry: ; CHECK-NEXT: [[B_LOC:%.*]] = alloca i8, align 1 ; CHECK-NEXT: [[A_LOC:%.*]] = alloca i8, align 1 ; CHECK-NEXT: [[RET_LOC:%.*]] = alloca i8, align 1 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[A_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B_LOC]]) ; CHECK-NEXT: call void @outlined_ir_func_1(ptr [[S:%.*]], ptr [[D:%.*]], ptr [[A_LOC]], ptr [[B_LOC]]) ; CHECK-NEXT: [[A_RELOAD:%.*]] = load i8, ptr [[A_LOC]], align 1 ; CHECK-NEXT: [[B_RELOAD:%.*]] = load i8, ptr [[B_LOC]], align 1 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B_LOC]]) ; CHECK-NEXT: call void @llvm.memmove.p0.p0.i64(ptr [[D]], ptr [[S]], i64 [[LEN:%.*]], i1 false) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[RET_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[RET_LOC]]) ; CHECK-NEXT: call void @outlined_ir_func_0(i8 [[A_RELOAD]], i8 [[B_RELOAD]], ptr [[S]], ptr [[RET_LOC]]) ; CHECK-NEXT: [[RET_RELOAD:%.*]] = load i8, ptr [[RET_LOC]], align 1 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[RET_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[RET_LOC]]) ; CHECK-NEXT: ret i8 [[RET_RELOAD]] ; ; diff --git a/llvm/test/Transforms/IROutliner/different-order-phi-merges.ll b/llvm/test/Transforms/IROutliner/different-order-phi-merges.ll index baf27ed..6730d1b 100644 --- a/llvm/test/Transforms/IROutliner/different-order-phi-merges.ll +++ b/llvm/test/Transforms/IROutliner/different-order-phi-merges.ll @@ -46,10 +46,10 @@ bb5: ; CHECK-LABEL: @f1( ; CHECK-NEXT: bb1: ; CHECK-NEXT: [[PHINODE_CE_LOC:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[PHINODE_CE_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[PHINODE_CE_LOC]]) ; CHECK-NEXT: [[TMP0:%.*]] = call i1 @outlined_ir_func_0(ptr [[PHINODE_CE_LOC]], i32 0) ; CHECK-NEXT: [[PHINODE_CE_RELOAD:%.*]] = load i32, ptr [[PHINODE_CE_LOC]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[PHINODE_CE_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PHINODE_CE_LOC]]) ; CHECK-NEXT: br i1 [[TMP0]], label [[BB5:%.*]], label [[BB1_AFTER_OUTLINE:%.*]] ; CHECK: bb1_after_outline: ; CHECK-NEXT: ret void @@ -61,10 +61,10 @@ bb5: ; CHECK-LABEL: @f2( ; CHECK-NEXT: bb1: ; CHECK-NEXT: [[PHINODE_CE_LOC:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[PHINODE_CE_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[PHINODE_CE_LOC]]) ; CHECK-NEXT: [[TMP0:%.*]] = call i1 @outlined_ir_func_0(ptr [[PHINODE_CE_LOC]], i32 1) ; CHECK-NEXT: [[PHINODE_CE_RELOAD:%.*]] = load i32, ptr [[PHINODE_CE_LOC]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[PHINODE_CE_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PHINODE_CE_LOC]]) ; CHECK-NEXT: br i1 [[TMP0]], label [[BB5:%.*]], label [[BB1_AFTER_OUTLINE:%.*]] ; CHECK: bb1_after_outline: ; CHECK-NEXT: ret void diff --git a/llvm/test/Transforms/IROutliner/duplicate-merging-phis.ll b/llvm/test/Transforms/IROutliner/duplicate-merging-phis.ll index 534efc3..53d52f5 100644 --- a/llvm/test/Transforms/IROutliner/duplicate-merging-phis.ll +++ b/llvm/test/Transforms/IROutliner/duplicate-merging-phis.ll @@ -49,13 +49,13 @@ bb5: ; CHECK-NEXT: bb1: ; CHECK-NEXT: [[PHINODE1_CE_LOC:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[PHINODE_CE_LOC:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[PHINODE_CE_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[PHINODE1_CE_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[PHINODE_CE_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[PHINODE1_CE_LOC]]) ; CHECK-NEXT: [[TARGETBLOCK:%.*]] = call i1 @outlined_ir_func_0(ptr [[PHINODE_CE_LOC]], ptr [[PHINODE1_CE_LOC]]) ; CHECK-NEXT: [[PHINODE_CE_RELOAD:%.*]] = load i32, ptr [[PHINODE_CE_LOC]], align 4 ; CHECK-NEXT: [[PHINODE1_CE_RELOAD:%.*]] = load i32, ptr [[PHINODE1_CE_LOC]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[PHINODE_CE_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[PHINODE1_CE_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PHINODE_CE_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PHINODE1_CE_LOC]]) ; CHECK-NEXT: br i1 [[TARGETBLOCK]], label [[BB5:%.*]], label [[BB1_AFTER_OUTLINE:%.*]] ; CHECK: bb1_after_outline: ; CHECK-NEXT: ret void @@ -69,13 +69,13 @@ bb5: ; CHECK-NEXT: bb1: ; CHECK-NEXT: [[PHINODE1_CE_LOC:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[PHINODE_CE_LOC:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[PHINODE_CE_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[PHINODE1_CE_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[PHINODE_CE_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[PHINODE1_CE_LOC]]) ; CHECK-NEXT: [[TARGETBLOCK:%.*]] = call i1 @outlined_ir_func_0(ptr [[PHINODE_CE_LOC]], ptr [[PHINODE1_CE_LOC]]) ; CHECK-NEXT: [[PHINODE_CE_RELOAD:%.*]] = load i32, ptr [[PHINODE_CE_LOC]], align 4 ; CHECK-NEXT: [[PHINODE1_CE_RELOAD:%.*]] = load i32, ptr [[PHINODE1_CE_LOC]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[PHINODE_CE_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[PHINODE1_CE_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PHINODE_CE_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PHINODE1_CE_LOC]]) ; CHECK-NEXT: br i1 [[TARGETBLOCK]], label [[BB5:%.*]], label [[BB1_AFTER_OUTLINE:%.*]] ; CHECK: bb1_after_outline: ; CHECK-NEXT: ret void diff --git a/llvm/test/Transforms/IROutliner/exit-block-phi-node-value-attribution.ll b/llvm/test/Transforms/IROutliner/exit-block-phi-node-value-attribution.ll index 3d3dbff..04ec9284f 100644 --- a/llvm/test/Transforms/IROutliner/exit-block-phi-node-value-attribution.ll +++ b/llvm/test/Transforms/IROutliner/exit-block-phi-node-value-attribution.ll @@ -42,19 +42,19 @@ bb5: ; CHECK-NEXT: bb1: ; CHECK-NEXT: [[PHINODE_CE_LOC1:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[PHINODE_CE_LOC:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[PHINODE_CE_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[PHINODE_CE_LOC]]) ; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[PHINODE_CE_LOC]], i32 0) ; CHECK-NEXT: [[PHINODE_CE_RELOAD:%.*]] = load i32, ptr [[PHINODE_CE_LOC]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[PHINODE_CE_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PHINODE_CE_LOC]]) ; CHECK-NEXT: br label [[BB5:%.*]] ; CHECK: placeholder: ; CHECK-NEXT: [[A:%.*]] = sub i32 5, 4 ; CHECK-NEXT: br label [[BB5]] ; CHECK: bb3: -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[PHINODE_CE_LOC1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[PHINODE_CE_LOC1]]) ; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[PHINODE_CE_LOC1]], i32 1) ; CHECK-NEXT: [[PHINODE_CE_RELOAD2:%.*]] = load i32, ptr [[PHINODE_CE_LOC1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[PHINODE_CE_LOC1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PHINODE_CE_LOC1]]) ; CHECK-NEXT: br label [[BB5]] ; CHECK: placeholder1: ; CHECK-NEXT: [[B:%.*]] = add i32 5, 4 diff --git a/llvm/test/Transforms/IROutliner/exit-phi-nodes-incoming-value-constant-argument.ll b/llvm/test/Transforms/IROutliner/exit-phi-nodes-incoming-value-constant-argument.ll index cd60f93..0e82217 100644 --- a/llvm/test/Transforms/IROutliner/exit-phi-nodes-incoming-value-constant-argument.ll +++ b/llvm/test/Transforms/IROutliner/exit-phi-nodes-incoming-value-constant-argument.ll @@ -72,10 +72,10 @@ bb5: ; CHECK-NEXT: br label [[BB5:%.*]] ; CHECK: bb2: ; CHECK-NEXT: [[A:%.*]] = sub i32 [[TMP0:%.*]], [[TMP1:%.*]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[F_CE_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[F_CE_LOC]]) ; CHECK-NEXT: call void @outlined_ir_func_0(i32 [[TMP0]], i32 [[TMP1]], i32 1, ptr [[F_CE_LOC]], i32 0) ; CHECK-NEXT: [[F_CE_RELOAD:%.*]] = load i32, ptr [[F_CE_LOC]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[F_CE_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[F_CE_LOC]]) ; CHECK-NEXT: br label [[BB5]] ; CHECK: bb4: ; CHECK-NEXT: [[E:%.*]] = add i32 [[TMP0]], [[TMP1]] diff --git a/llvm/test/Transforms/IROutliner/extraction.ll b/llvm/test/Transforms/IROutliner/extraction.ll index 1eca4ea..77f904d 100644 --- a/llvm/test/Transforms/IROutliner/extraction.ll +++ b/llvm/test/Transforms/IROutliner/extraction.ll @@ -59,13 +59,13 @@ define void @extract_outs1() #0 { ; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[OUTPUT:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[RESULT:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[ADD_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTLOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ADD_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTLOC]]) ; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[ADD_LOC]], ptr [[DOTLOC]]) ; CHECK-NEXT: [[ADD_RELOAD:%.*]] = load i32, ptr [[ADD_LOC]], align 4 ; CHECK-NEXT: [[DOTRELOAD:%.*]] = load i32, ptr [[DOTLOC]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[ADD_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTLOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ADD_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTLOC]]) ; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[OUTPUT]], align 4 ; CHECK-NEXT: call void @outlined_ir_func_2(i32 [[DOTRELOAD]], i32 [[ADD_RELOAD]], ptr [[RESULT]]) ; CHECK-NEXT: ret void @@ -99,13 +99,13 @@ define void @extract_outs2() #0 { ; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[OUTPUT:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[RESULT:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[ADD_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTLOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ADD_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTLOC]]) ; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[ADD_LOC]], ptr [[DOTLOC]]) ; CHECK-NEXT: [[ADD_RELOAD:%.*]] = load i32, ptr [[ADD_LOC]], align 4 ; CHECK-NEXT: [[DOTRELOAD:%.*]] = load i32, ptr [[DOTLOC]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[ADD_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTLOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ADD_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTLOC]]) ; CHECK-NEXT: call void @outlined_ir_func_2(i32 [[DOTRELOAD]], i32 [[ADD_RELOAD]], ptr [[RESULT]]) ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/IROutliner/gvn-output-set-overload.ll b/llvm/test/Transforms/IROutliner/gvn-output-set-overload.ll index 1184b4a..54f013c 100644 --- a/llvm/test/Transforms/IROutliner/gvn-output-set-overload.ll +++ b/llvm/test/Transforms/IROutliner/gvn-output-set-overload.ll @@ -44,10 +44,10 @@ next: ; CHECK-NEXT: entry: ; CHECK-NEXT: [[DOTCE_LOC:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTCE_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTCE_LOC]]) ; CHECK-NEXT: [[TMP1:%.*]] = call i1 @outlined_ir_func_0(ptr [[TMP0]], ptr [[DOTCE_LOC]], ptr null, i32 0) ; CHECK-NEXT: [[DOTCE_RELOAD:%.*]] = load i32, ptr [[DOTCE_LOC]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTCE_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTCE_LOC]]) ; CHECK-NEXT: br i1 [[TMP1]], label [[FIRST:%.*]], label [[NEXT:%.*]] ; CHECK: first: ; CHECK-NEXT: [[TMP2:%.*]] = phi i32 [ [[DOTCE_RELOAD]], [[ENTRY:%.*]] ] @@ -61,13 +61,13 @@ next: ; CHECK-NEXT: [[E_LOC:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[C_LOC:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[C_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[E_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[C_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[E_LOC]]) ; CHECK-NEXT: [[TMP1:%.*]] = call i1 @outlined_ir_func_0(ptr [[TMP0]], ptr [[C_LOC]], ptr [[E_LOC]], i32 1) ; CHECK-NEXT: [[C_RELOAD:%.*]] = load i32, ptr [[C_LOC]], align 4 ; CHECK-NEXT: [[E_RELOAD:%.*]] = load i32, ptr [[E_LOC]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[C_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[E_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[C_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[E_LOC]]) ; CHECK-NEXT: br i1 [[TMP1]], label [[FIRST:%.*]], label [[NEXT:%.*]] ; CHECK: first: ; CHECK-NEXT: ret void diff --git a/llvm/test/Transforms/IROutliner/gvn-phi-debug.ll b/llvm/test/Transforms/IROutliner/gvn-phi-debug.ll index 951466c..0c899dc 100644 --- a/llvm/test/Transforms/IROutliner/gvn-phi-debug.ll +++ b/llvm/test/Transforms/IROutliner/gvn-phi-debug.ll @@ -8,10 +8,10 @@ define i32 @r() { ; CHECK-LABEL: define i32 @r() { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[DOTLOC:%.*]] = alloca ptr, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTLOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTLOC]]) ; CHECK-NEXT: [[TMP0:%.*]] = call i1 @outlined_ir_func_0(ptr [[DOTLOC]], i32 0) ; CHECK-NEXT: [[DOTRELOAD:%.*]] = load ptr, ptr [[DOTLOC]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTLOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTLOC]]) ; CHECK-NEXT: br i1 [[TMP0]], label [[IF_END8:%.*]], label [[ENTRY_AFTER_OUTLINE:%.*]] ; CHECK: entry_after_outline: ; CHECK-NEXT: [[CALL7:%.*]] = call i32 [[DOTRELOAD]]() @@ -91,10 +91,10 @@ define i32 @w() !dbg !8 { ; CHECK-SAME: ) !dbg [[DBG8:![0-9]+]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[RETVAL_1_CE_LOC:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[RETVAL_1_CE_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[RETVAL_1_CE_LOC]]) ; CHECK-NEXT: [[TMP0:%.*]] = call i1 @outlined_ir_func_0(ptr [[RETVAL_1_CE_LOC]], i32 1), !dbg [[DBG11:![0-9]+]] ; CHECK-NEXT: [[RETVAL_1_CE_RELOAD:%.*]] = load i32, ptr [[RETVAL_1_CE_LOC]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[RETVAL_1_CE_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[RETVAL_1_CE_LOC]]) ; CHECK-NEXT: br i1 [[TMP0]], label [[CLEANUP10:%.*]], label [[ENTRY_AFTER_OUTLINE:%.*]] ; CHECK: entry_after_outline: ; CHECK-NEXT: [[CALL8:%.*]] = call i32 @llvm.bswap.i32(i32 0) diff --git a/llvm/test/Transforms/IROutliner/illegal-assumes.ll b/llvm/test/Transforms/IROutliner/illegal-assumes.ll index d863fe7..c0c4e1a 100644 --- a/llvm/test/Transforms/IROutliner/illegal-assumes.ll +++ b/llvm/test/Transforms/IROutliner/illegal-assumes.ll @@ -12,10 +12,10 @@ define void @outline_assumes() { ; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[D:%.*]] = alloca i1, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DL_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DL_LOC]]) ; CHECK-NEXT: call void @outlined_ir_func_4(i1 true, ptr [[D]], ptr [[DL_LOC]]) ; CHECK-NEXT: [[DL_RELOAD:%.*]] = load i1, ptr [[DL_LOC]], align 1 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DL_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DL_LOC]]) ; CHECK-NEXT: [[SPLIT_INST:%.*]] = sub i1 [[DL_RELOAD]], [[DL_RELOAD]] ; CHECK-NEXT: call void @outlined_ir_func_1(ptr [[A]], ptr [[B]], ptr [[C]]) ; CHECK-NEXT: call void @llvm.assume(i1 [[DL_RELOAD]]) @@ -48,10 +48,10 @@ define void @outline_assumes2() { ; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[D:%.*]] = alloca i1, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DL_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DL_LOC]]) ; CHECK-NEXT: call void @outlined_ir_func_4(i1 false, ptr [[D]], ptr [[DL_LOC]]) ; CHECK-NEXT: [[DL_RELOAD:%.*]] = load i1, ptr [[DL_LOC]], align 1 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DL_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DL_LOC]]) ; CHECK-NEXT: call void @outlined_ir_func_1(ptr [[A]], ptr [[B]], ptr [[C]]) ; CHECK-NEXT: call void @llvm.assume(i1 [[DL_RELOAD]]) ; CHECK-NEXT: call void @outlined_ir_func_2(ptr [[A]], ptr [[B]], ptr [[C]]) @@ -82,10 +82,10 @@ define void @outline_assumes3() { ; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[D:%.*]] = alloca i1, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DL_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DL_LOC]]) ; CHECK-NEXT: call void @outlined_ir_func_0(i1 true, ptr [[D]], ptr [[A]], ptr [[B]], ptr [[C]], ptr [[DL_LOC]]) ; CHECK-NEXT: [[DL_RELOAD:%.*]] = load i1, ptr [[DL_LOC]], align 1 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DL_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DL_LOC]]) ; CHECK-NEXT: call void @llvm.assume(i1 [[DL_RELOAD]]) ; CHECK-NEXT: call void @outlined_ir_func_3(ptr [[A]]) ; CHECK-NEXT: ret void @@ -115,10 +115,10 @@ define void @outline_assumes4() { ; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[D:%.*]] = alloca i1, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DL_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DL_LOC]]) ; CHECK-NEXT: call void @outlined_ir_func_0(i1 false, ptr [[D]], ptr [[A]], ptr [[B]], ptr [[C]], ptr [[DL_LOC]]) ; CHECK-NEXT: [[DL_RELOAD:%.*]] = load i1, ptr [[DL_LOC]], align 1 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DL_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DL_LOC]]) ; CHECK-NEXT: call void @llvm.assume(i1 [[DL_RELOAD]]) ; CHECK-NEXT: call void @outlined_ir_func_3(ptr [[A]]) ; CHECK-NEXT: ret void diff --git a/llvm/test/Transforms/IROutliner/illegal-memcpy.ll b/llvm/test/Transforms/IROutliner/illegal-memcpy.ll index 20e009a..523fd23 100644 --- a/llvm/test/Transforms/IROutliner/illegal-memcpy.ll +++ b/llvm/test/Transforms/IROutliner/illegal-memcpy.ll @@ -12,18 +12,18 @@ define i8 @function1(ptr noalias %s, ptr noalias %d, i64 %len) { ; CHECK-NEXT: [[B_LOC:%.*]] = alloca i8, align 1 ; CHECK-NEXT: [[A_LOC:%.*]] = alloca i8, align 1 ; CHECK-NEXT: [[RET_LOC:%.*]] = alloca i8, align 1 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[A_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B_LOC]]) ; CHECK-NEXT: call void @outlined_ir_func_1(ptr [[S:%.*]], ptr [[D:%.*]], ptr [[A_LOC]], ptr [[B_LOC]]) ; CHECK-NEXT: [[A_RELOAD:%.*]] = load i8, ptr [[A_LOC]], align 1 ; CHECK-NEXT: [[B_RELOAD:%.*]] = load i8, ptr [[B_LOC]], align 1 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B_LOC]]) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[D]], ptr [[S]], i64 [[LEN:%.*]], i1 false) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[RET_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[RET_LOC]]) ; CHECK-NEXT: call void @outlined_ir_func_0(i8 [[A_RELOAD]], i8 [[B_RELOAD]], ptr [[S]], ptr [[RET_LOC]]) ; CHECK-NEXT: [[RET_RELOAD:%.*]] = load i8, ptr [[RET_LOC]], align 1 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[RET_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[RET_LOC]]) ; CHECK-NEXT: ret i8 [[RET_RELOAD]] ; entry: @@ -41,18 +41,18 @@ define i8 @function2(ptr noalias %s, ptr noalias %d, i64 %len) { ; CHECK-NEXT: [[B_LOC:%.*]] = alloca i8, align 1 ; CHECK-NEXT: [[A_LOC:%.*]] = alloca i8, align 1 ; CHECK-NEXT: [[RET_LOC:%.*]] = alloca i8, align 1 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[A_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B_LOC]]) ; CHECK-NEXT: call void @outlined_ir_func_1(ptr [[S:%.*]], ptr [[D:%.*]], ptr [[A_LOC]], ptr [[B_LOC]]) ; CHECK-NEXT: [[A_RELOAD:%.*]] = load i8, ptr [[A_LOC]], align 1 ; CHECK-NEXT: [[B_RELOAD:%.*]] = load i8, ptr [[B_LOC]], align 1 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B_LOC]]) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[D]], ptr [[S]], i64 [[LEN:%.*]], i1 false) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[RET_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[RET_LOC]]) ; CHECK-NEXT: call void @outlined_ir_func_0(i8 [[A_RELOAD]], i8 [[B_RELOAD]], ptr [[S]], ptr [[RET_LOC]]) ; CHECK-NEXT: [[RET_RELOAD:%.*]] = load i8, ptr [[RET_LOC]], align 1 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[RET_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[RET_LOC]]) ; CHECK-NEXT: ret i8 [[RET_RELOAD]] ; entry: diff --git a/llvm/test/Transforms/IROutliner/illegal-memmove.ll b/llvm/test/Transforms/IROutliner/illegal-memmove.ll index 06480c8..7482405 100644 --- a/llvm/test/Transforms/IROutliner/illegal-memmove.ll +++ b/llvm/test/Transforms/IROutliner/illegal-memmove.ll @@ -12,18 +12,18 @@ define i8 @function1(ptr noalias %s, ptr noalias %d, i64 %len) { ; CHECK-NEXT: [[B_LOC:%.*]] = alloca i8, align 1 ; CHECK-NEXT: [[A_LOC:%.*]] = alloca i8, align 1 ; CHECK-NEXT: [[RET_LOC:%.*]] = alloca i8, align 1 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[A_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B_LOC]]) ; CHECK-NEXT: call void @outlined_ir_func_1(ptr [[S:%.*]], ptr [[D:%.*]], ptr [[A_LOC]], ptr [[B_LOC]]) ; CHECK-NEXT: [[A_RELOAD:%.*]] = load i8, ptr [[A_LOC]], align 1 ; CHECK-NEXT: [[B_RELOAD:%.*]] = load i8, ptr [[B_LOC]], align 1 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B_LOC]]) ; CHECK-NEXT: call void @llvm.memmove.p0.p0.i64(ptr [[D]], ptr [[S]], i64 [[LEN:%.*]], i1 false) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[RET_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[RET_LOC]]) ; CHECK-NEXT: call void @outlined_ir_func_0(i8 [[A_RELOAD]], i8 [[B_RELOAD]], ptr [[S]], ptr [[RET_LOC]]) ; CHECK-NEXT: [[RET_RELOAD:%.*]] = load i8, ptr [[RET_LOC]], align 1 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[RET_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[RET_LOC]]) ; CHECK-NEXT: ret i8 [[RET_RELOAD]] ; entry: @@ -41,18 +41,18 @@ define i8 @function2(ptr noalias %s, ptr noalias %d, i64 %len) { ; CHECK-NEXT: [[B_LOC:%.*]] = alloca i8, align 1 ; CHECK-NEXT: [[A_LOC:%.*]] = alloca i8, align 1 ; CHECK-NEXT: [[RET_LOC:%.*]] = alloca i8, align 1 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[A_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B_LOC]]) ; CHECK-NEXT: call void @outlined_ir_func_1(ptr [[S:%.*]], ptr [[D:%.*]], ptr [[A_LOC]], ptr [[B_LOC]]) ; CHECK-NEXT: [[A_RELOAD:%.*]] = load i8, ptr [[A_LOC]], align 1 ; CHECK-NEXT: [[B_RELOAD:%.*]] = load i8, ptr [[B_LOC]], align 1 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B_LOC]]) ; CHECK-NEXT: call void @llvm.memmove.p0.p0.i64(ptr [[D]], ptr [[S]], i64 [[LEN:%.*]], i1 false) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[RET_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[RET_LOC]]) ; CHECK-NEXT: call void @outlined_ir_func_0(i8 [[A_RELOAD]], i8 [[B_RELOAD]], ptr [[S]], ptr [[RET_LOC]]) ; CHECK-NEXT: [[RET_RELOAD:%.*]] = load i8, ptr [[RET_LOC]], align 1 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[RET_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[RET_LOC]]) ; CHECK-NEXT: ret i8 [[RET_RELOAD]] ; entry: diff --git a/llvm/test/Transforms/IROutliner/illegal-vaarg.ll b/llvm/test/Transforms/IROutliner/illegal-vaarg.ll index 38dfd25..15f9aa2 100644 --- a/llvm/test/Transforms/IROutliner/illegal-vaarg.ll +++ b/llvm/test/Transforms/IROutliner/illegal-vaarg.ll @@ -21,10 +21,10 @@ define i32 @func1(i32 %a, double %b, ptr %v, ...) nounwind { ; CHECK-NEXT: [[TMP0:%.*]] = va_arg ptr [[AP]], i32 ; CHECK-NEXT: call void @llvm.va_copy.p0(ptr [[V:%.*]], ptr [[AP]]) ; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[AP]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[TMP_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP_LOC]]) ; CHECK-NEXT: call void @outlined_ir_func_1(i32 [[TMP0]], ptr [[C]], ptr [[TMP_LOC]]) ; CHECK-NEXT: [[TMP_RELOAD:%.*]] = load i32, ptr [[TMP_LOC]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[TMP_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP_LOC]]) ; CHECK-NEXT: ret i32 [[TMP_RELOAD]] ; entry: @@ -56,10 +56,10 @@ define i32 @func2(i32 %a, double %b, ptr %v, ...) nounwind { ; CHECK-NEXT: [[TMP0:%.*]] = va_arg ptr [[AP]], i32 ; CHECK-NEXT: call void @llvm.va_copy.p0(ptr [[V:%.*]], ptr [[AP]]) ; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[AP]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[TMP_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP_LOC]]) ; CHECK-NEXT: call void @outlined_ir_func_1(i32 [[TMP0]], ptr [[C]], ptr [[TMP_LOC]]) ; CHECK-NEXT: [[TMP_RELOAD:%.*]] = load i32, ptr [[TMP_LOC]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[TMP_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP_LOC]]) ; CHECK-NEXT: ret i32 [[TMP_RELOAD]] ; entry: diff --git a/llvm/test/Transforms/IROutliner/mismatched-phi-exits-not-in-first-outlined.ll b/llvm/test/Transforms/IROutliner/mismatched-phi-exits-not-in-first-outlined.ll index 24ad86f..f9d4999 100644 --- a/llvm/test/Transforms/IROutliner/mismatched-phi-exits-not-in-first-outlined.ll +++ b/llvm/test/Transforms/IROutliner/mismatched-phi-exits-not-in-first-outlined.ll @@ -47,10 +47,10 @@ first: ; CHECK-NEXT: entry: ; CHECK-NEXT: [[DOTCE_LOC:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTCE_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTCE_LOC]]) ; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[TMP0]], ptr [[DOTCE_LOC]], i32 0) ; CHECK-NEXT: [[DOTCE_RELOAD:%.*]] = load i32, ptr [[DOTCE_LOC]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTCE_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTCE_LOC]]) ; CHECK-NEXT: br label [[FIRST:%.*]] ; CHECK: first: ; CHECK-NEXT: [[TMP1:%.*]] = phi i32 [ [[DOTCE_RELOAD]], [[ENTRY:%.*]] ] diff --git a/llvm/test/Transforms/IROutliner/mismatched-phi-exits.ll b/llvm/test/Transforms/IROutliner/mismatched-phi-exits.ll index 6b50e99..7191c80 100644 --- a/llvm/test/Transforms/IROutliner/mismatched-phi-exits.ll +++ b/llvm/test/Transforms/IROutliner/mismatched-phi-exits.ll @@ -38,10 +38,10 @@ first: ; CHECK-NEXT: entry: ; CHECK-NEXT: [[DOTCE_LOC:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTCE_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTCE_LOC]]) ; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[TMP0]], ptr [[DOTCE_LOC]], i32 0) ; CHECK-NEXT: [[DOTCE_RELOAD:%.*]] = load i32, ptr [[DOTCE_LOC]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTCE_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTCE_LOC]]) ; CHECK-NEXT: br label [[FIRST:%.*]] ; CHECK: first: ; CHECK-NEXT: [[TMP1:%.*]] = phi i32 [ [[DOTCE_RELOAD]], [[ENTRY:%.*]] ] diff --git a/llvm/test/Transforms/IROutliner/mismatched-phi-outputs-ordering.ll b/llvm/test/Transforms/IROutliner/mismatched-phi-outputs-ordering.ll index ab1836f..9085e7e 100644 --- a/llvm/test/Transforms/IROutliner/mismatched-phi-outputs-ordering.ll +++ b/llvm/test/Transforms/IROutliner/mismatched-phi-outputs-ordering.ll @@ -48,16 +48,16 @@ next: ; CHECK-NEXT: [[D_LOC:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[E_LOC:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[E_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[D_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTCE_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[E_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[D_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTCE_LOC]]) ; CHECK-NEXT: [[TMP1:%.*]] = call i1 @outlined_ir_func_0(ptr [[TMP0]], ptr [[E_LOC]], ptr [[D_LOC]], ptr [[DOTCE_LOC]], i32 0) ; CHECK-NEXT: [[E_RELOAD:%.*]] = load i32, ptr [[E_LOC]], align 4 ; CHECK-NEXT: [[D_RELOAD:%.*]] = load i32, ptr [[D_LOC]], align 4 ; CHECK-NEXT: [[DOTCE_RELOAD:%.*]] = load i32, ptr [[DOTCE_LOC]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[E_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[D_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTCE_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[E_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[D_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTCE_LOC]]) ; CHECK-NEXT: br i1 [[TMP1]], label [[FIRST:%.*]], label [[NEXT:%.*]] ; CHECK: first: ; CHECK-NEXT: [[TMP2:%.*]] = phi i32 [ [[DOTCE_RELOAD]], [[ENTRY:%.*]] ] @@ -72,13 +72,13 @@ next: ; CHECK-NEXT: [[D_LOC:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[E_LOC:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[E_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[D_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[E_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[D_LOC]]) ; CHECK-NEXT: [[TMP1:%.*]] = call i1 @outlined_ir_func_0(ptr [[TMP0]], ptr [[E_LOC]], ptr [[D_LOC]], ptr null, i32 1) ; CHECK-NEXT: [[E_RELOAD:%.*]] = load i32, ptr [[E_LOC]], align 4 ; CHECK-NEXT: [[D_RELOAD:%.*]] = load i32, ptr [[D_LOC]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[E_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[D_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[E_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[D_LOC]]) ; CHECK-NEXT: br i1 [[TMP1]], label [[FIRST:%.*]], label [[NEXT:%.*]] ; CHECK: first: ; CHECK-NEXT: ret void diff --git a/llvm/test/Transforms/IROutliner/must-capture-all-phi-nodes-begin.ll b/llvm/test/Transforms/IROutliner/must-capture-all-phi-nodes-begin.ll index 32973ea..3229f42 100644 --- a/llvm/test/Transforms/IROutliner/must-capture-all-phi-nodes-begin.ll +++ b/llvm/test/Transforms/IROutliner/must-capture-all-phi-nodes-begin.ll @@ -57,10 +57,10 @@ first: ; CHECK: test1: ; CHECK-NEXT: [[TMP1:%.*]] = phi i32 [ [[E_RELOAD:%.*]], [[TEST1]] ], [ [[Y]], [[ENTRY:%.*]] ] ; CHECK-NEXT: [[TMP2:%.*]] = phi i32 [ [[E_RELOAD]], [[TEST1]] ], [ [[Y]], [[ENTRY]] ] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[E_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[E_LOC]]) ; CHECK-NEXT: [[TARGETBLOCK:%.*]] = call i1 @outlined_ir_func_0(ptr [[TMP0]], i32 [[C]], ptr [[E_LOC]]) ; CHECK-NEXT: [[E_RELOAD]] = load i32, ptr [[E_LOC]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[E_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[E_LOC]]) ; CHECK-NEXT: br i1 [[TARGETBLOCK]], label [[TEST1]], label [[FIRST:%.*]] ; CHECK: first: ; CHECK-NEXT: ret void @@ -78,10 +78,10 @@ first: ; CHECK: test1: ; CHECK-NEXT: [[TMP1:%.*]] = phi i32 [ [[E_RELOAD:%.*]], [[TEST1]] ], [ [[Y]], [[ENTRY:%.*]] ] ; CHECK-NEXT: [[TMP2:%.*]] = phi i32 [ [[Y]], [[ENTRY]] ], [ [[E_RELOAD]], [[TEST1]] ] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[E_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[E_LOC]]) ; CHECK-NEXT: [[TARGETBLOCK:%.*]] = call i1 @outlined_ir_func_0(ptr [[TMP0]], i32 [[C]], ptr [[E_LOC]]) ; CHECK-NEXT: [[E_RELOAD]] = load i32, ptr [[E_LOC]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[E_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[E_LOC]]) ; CHECK-NEXT: br i1 [[TARGETBLOCK]], label [[TEST1]], label [[FIRST:%.*]] ; CHECK: first: ; CHECK-NEXT: ret void diff --git a/llvm/test/Transforms/IROutliner/no-external-block-entries.ll b/llvm/test/Transforms/IROutliner/no-external-block-entries.ll index 4426009..fb2c5e9 100644 --- a/llvm/test/Transforms/IROutliner/no-external-block-entries.ll +++ b/llvm/test/Transforms/IROutliner/no-external-block-entries.ll @@ -35,10 +35,10 @@ block_6: ; CHECK-LABEL: @fn1( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[B_CE_LOC:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B_CE_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B_CE_LOC]]) ; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[B_CE_LOC]], i32 0) ; CHECK-NEXT: [[B_CE_RELOAD:%.*]] = load i32, ptr [[B_CE_LOC]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B_CE_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B_CE_LOC]]) ; CHECK-NEXT: br label [[BLOCK_3:%.*]] ; CHECK: block_3: ; CHECK-NEXT: [[B:%.*]] = phi i32 [ [[B_CE_RELOAD]], [[ENTRY:%.*]] ] diff --git a/llvm/test/Transforms/IROutliner/one-external-incoming-block-phi-node.ll b/llvm/test/Transforms/IROutliner/one-external-incoming-block-phi-node.ll index 77e3a82..9627274 100644 --- a/llvm/test/Transforms/IROutliner/one-external-incoming-block-phi-node.ll +++ b/llvm/test/Transforms/IROutliner/one-external-incoming-block-phi-node.ll @@ -33,10 +33,10 @@ block_6: ; CHECK-LABEL: @fn1( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[B_CE_LOC:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B_CE_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B_CE_LOC]]) ; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[B_CE_LOC]], i32 0) ; CHECK-NEXT: [[B_CE_RELOAD:%.*]] = load i32, ptr [[B_CE_LOC]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B_CE_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B_CE_LOC]]) ; CHECK-NEXT: br label [[BLOCK_3:%.*]] ; CHECK: block_3: ; CHECK-NEXT: [[B:%.*]] = phi i32 [ [[B_CE_RELOAD]], [[ENTRY:%.*]] ] diff --git a/llvm/test/Transforms/IROutliner/outline-memcpy.ll b/llvm/test/Transforms/IROutliner/outline-memcpy.ll index 0cf4f34..83fd5f6 100644 --- a/llvm/test/Transforms/IROutliner/outline-memcpy.ll +++ b/llvm/test/Transforms/IROutliner/outline-memcpy.ll @@ -27,20 +27,20 @@ entry: ; CHECK-LABEL: @function1( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[RET_LOC:%.*]] = alloca i8, align 1 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[RET_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[RET_LOC]]) ; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[S:%.*]], ptr [[D:%.*]], i64 [[LEN:%.*]], ptr [[RET_LOC]]) ; CHECK-NEXT: [[RET_RELOAD:%.*]] = load i8, ptr [[RET_LOC]], align 1 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[RET_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[RET_LOC]]) ; CHECK-NEXT: ret i8 [[RET_RELOAD]] ; ; ; CHECK-LABEL: @function2( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[RET_LOC:%.*]] = alloca i8, align 1 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[RET_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[RET_LOC]]) ; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[S:%.*]], ptr [[D:%.*]], i64 [[LEN:%.*]], ptr [[RET_LOC]]) ; CHECK-NEXT: [[RET_RELOAD:%.*]] = load i8, ptr [[RET_LOC]], align 1 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[RET_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[RET_LOC]]) ; CHECK-NEXT: ret i8 [[RET_RELOAD]] ; ; diff --git a/llvm/test/Transforms/IROutliner/outline-memmove.ll b/llvm/test/Transforms/IROutliner/outline-memmove.ll index cf79244..c512cd4 100644 --- a/llvm/test/Transforms/IROutliner/outline-memmove.ll +++ b/llvm/test/Transforms/IROutliner/outline-memmove.ll @@ -27,20 +27,20 @@ entry: ; CHECK-LABEL: @function1( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[RET_LOC:%.*]] = alloca i8, align 1 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[RET_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[RET_LOC]]) ; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[S:%.*]], ptr [[D:%.*]], i64 [[LEN:%.*]], ptr [[RET_LOC]]) ; CHECK-NEXT: [[RET_RELOAD:%.*]] = load i8, ptr [[RET_LOC]], align 1 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[RET_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[RET_LOC]]) ; CHECK-NEXT: ret i8 [[RET_RELOAD]] ; ; ; CHECK-LABEL: @function2( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[RET_LOC:%.*]] = alloca i8, align 1 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[RET_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[RET_LOC]]) ; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[S:%.*]], ptr [[D:%.*]], i64 [[LEN:%.*]], ptr [[RET_LOC]]) ; CHECK-NEXT: [[RET_RELOAD:%.*]] = load i8, ptr [[RET_LOC]], align 1 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[RET_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[RET_LOC]]) ; CHECK-NEXT: ret i8 [[RET_RELOAD]] ; ; diff --git a/llvm/test/Transforms/IROutliner/outline-vaarg-intrinsic.ll b/llvm/test/Transforms/IROutliner/outline-vaarg-intrinsic.ll index 2d52608..6a9cbca 100644 --- a/llvm/test/Transforms/IROutliner/outline-vaarg-intrinsic.ll +++ b/llvm/test/Transforms/IROutliner/outline-vaarg-intrinsic.ll @@ -53,10 +53,10 @@ entry: ; CHECK-NEXT: store double [[B]], ptr [[B_ADDR]], align 8 ; CHECK-NEXT: call void @llvm.va_start.p0(ptr [[AP]]) ; CHECK-NEXT: [[TMP0:%.*]] = va_arg ptr [[AP]], i32 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[TMP_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP_LOC]]) ; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[V]], ptr [[AP]], i32 [[TMP0]], ptr [[C]], ptr [[TMP_LOC]]) ; CHECK-NEXT: [[TMP_RELOAD:%.*]] = load i32, ptr [[TMP_LOC]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[TMP_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP_LOC]]) ; CHECK-NEXT: ret i32 [[TMP_RELOAD]] ; ; @@ -72,10 +72,10 @@ entry: ; CHECK-NEXT: store double [[B]], ptr [[B_ADDR]], align 8 ; CHECK-NEXT: call void @llvm.va_start.p0(ptr [[AP]]) ; CHECK-NEXT: [[TMP0:%.*]] = va_arg ptr [[AP]], i32 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[TMP_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP_LOC]]) ; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[V]], ptr [[AP]], i32 [[TMP0]], ptr [[C]], ptr [[TMP_LOC]]) ; CHECK-NEXT: [[TMP_RELOAD:%.*]] = load i32, ptr [[TMP_LOC]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[TMP_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP_LOC]]) ; CHECK-NEXT: ret i32 [[TMP_RELOAD]] ; ; diff --git a/llvm/test/Transforms/IROutliner/outlining-bitcasts.ll b/llvm/test/Transforms/IROutliner/outlining-bitcasts.ll index 31f1d12..a8153a4 100644 --- a/llvm/test/Transforms/IROutliner/outlining-bitcasts.ll +++ b/llvm/test/Transforms/IROutliner/outlining-bitcasts.ll @@ -8,8 +8,8 @@ ; Additionally, we check that the newly added bitcast instruction is excluded in ; further extractions. -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) define void @outline_bitcast_base() { entry: @@ -38,11 +38,11 @@ entry: %al = load i32, ptr %a %bl = load i32, ptr %b %cl = load i32, ptr %c - call void @llvm.lifetime.start.p0(i64 -1, ptr %d) + call void @llvm.lifetime.start.p0(ptr %d) %am = load i32, ptr %b %bm = load i32, ptr %a %cm = load i32, ptr %c - call void @llvm.lifetime.end.p0(i64 -1, ptr %d) + call void @llvm.lifetime.end.p0(ptr %d) ret void } @@ -61,8 +61,8 @@ entry: %am = add i32 %a, %b %bm = add i32 %b, %a %cm = add i32 %b, %c - call void @llvm.lifetime.start.p0(i64 -1, ptr %d) - call void @llvm.lifetime.end.p0(i64 -1, ptr %d) + call void @llvm.lifetime.start.p0(ptr %d) + call void @llvm.lifetime.end.p0(ptr %d) ret void } @@ -114,13 +114,13 @@ entry: ; CHECK-LABEL: @outlined_ir_func_1( ; CHECK-NEXT: newFuncRoot: ; CHECK-NEXT: [[D:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[D]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[D]]) ; CHECK-NEXT: br label [[ENTRY_TO_OUTLINE:%.*]] ; CHECK: entry_to_outline: ; CHECK-NEXT: [[AL:%.*]] = add i32 [[TMP0:%.*]], [[TMP1:%.*]] ; CHECK-NEXT: [[BL:%.*]] = add i32 [[TMP1]], [[TMP0]] ; CHECK-NEXT: [[CL:%.*]] = add i32 [[TMP1]], [[TMP2:%.*]] -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[D]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[D]]) ; CHECK-NEXT: br label [[ENTRY_AFTER_OUTLINE_EXITSTUB:%.*]] ; CHECK: entry_after_outline.exitStub: ; CHECK-NEXT: ret void diff --git a/llvm/test/Transforms/IROutliner/outlining-branches-phi-nodes.ll b/llvm/test/Transforms/IROutliner/outlining-branches-phi-nodes.ll index 28c23e3..bb6bf8f 100644 --- a/llvm/test/Transforms/IROutliner/outlining-branches-phi-nodes.ll +++ b/llvm/test/Transforms/IROutliner/outlining-branches-phi-nodes.ll @@ -100,10 +100,10 @@ block_6: ; CHECK-NEXT: [[B2:%.*]] = alloca i32, align 4 ; CHECK-NEXT: br label [[BLOCK_2]] ; CHECK: block_2: -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DIFF_CE_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DIFF_CE_LOC]]) ; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[RESULT]], ptr [[DIFF_CE_LOC]]) ; CHECK-NEXT: [[DIFF_CE_RELOAD:%.*]] = load i32, ptr [[DIFF_CE_LOC]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DIFF_CE_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DIFF_CE_LOC]]) ; CHECK-NEXT: br label [[BLOCK_6:%.*]] ; CHECK: dummy: ; CHECK-NEXT: ret void @@ -127,10 +127,10 @@ block_6: ; CHECK-NEXT: [[B2:%.*]] = alloca i32, align 4 ; CHECK-NEXT: br label [[BLOCK_2]] ; CHECK: block_2: -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DIFF_CE_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DIFF_CE_LOC]]) ; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[RESULT]], ptr [[DIFF_CE_LOC]]) ; CHECK-NEXT: [[DIFF_CE_RELOAD:%.*]] = load i32, ptr [[DIFF_CE_LOC]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DIFF_CE_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DIFF_CE_LOC]]) ; CHECK-NEXT: br label [[BLOCK_6:%.*]] ; CHECK: dummy: ; CHECK-NEXT: ret void diff --git a/llvm/test/Transforms/IROutliner/outlining-cost-model.ll b/llvm/test/Transforms/IROutliner/outlining-cost-model.ll index 81bf4f0..bb3163a 100644 --- a/llvm/test/Transforms/IROutliner/outlining-cost-model.ll +++ b/llvm/test/Transforms/IROutliner/outlining-cost-model.ll @@ -104,13 +104,13 @@ define void @function3() #0 { ; NOCOST-NEXT: [[B:%.*]] = alloca i32, align 4 ; NOCOST-NEXT: [[OUTPUT:%.*]] = alloca i32, align 4 ; NOCOST-NEXT: [[RESULT:%.*]] = alloca i32, align 4 -; NOCOST-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[ADD_LOC]]) -; NOCOST-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTLOC]]) +; NOCOST-NEXT: call void @llvm.lifetime.start.p0(ptr [[ADD_LOC]]) +; NOCOST-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTLOC]]) ; NOCOST-NEXT: call void @outlined_ir_func_1(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[ADD_LOC]], ptr [[DOTLOC]]) ; NOCOST-NEXT: [[ADD_RELOAD:%.*]] = load i32, ptr [[ADD_LOC]], align 4 ; NOCOST-NEXT: [[DOTRELOAD:%.*]] = load i32, ptr [[DOTLOC]], align 4 -; NOCOST-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[ADD_LOC]]) -; NOCOST-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTLOC]]) +; NOCOST-NEXT: call void @llvm.lifetime.end.p0(ptr [[ADD_LOC]]) +; NOCOST-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTLOC]]) ; NOCOST-NEXT: [[TMP0:%.*]] = load i32, ptr [[OUTPUT]], align 4 ; NOCOST-NEXT: call void @outlined_ir_func_2(i32 [[DOTRELOAD]], i32 [[ADD_RELOAD]], ptr [[RESULT]]) ; NOCOST-NEXT: ret void @@ -159,13 +159,13 @@ define void @function4() #0 { ; NOCOST-NEXT: [[B:%.*]] = alloca i32, align 4 ; NOCOST-NEXT: [[OUTPUT:%.*]] = alloca i32, align 4 ; NOCOST-NEXT: [[RESULT:%.*]] = alloca i32, align 4 -; NOCOST-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[ADD_LOC]]) -; NOCOST-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTLOC]]) +; NOCOST-NEXT: call void @llvm.lifetime.start.p0(ptr [[ADD_LOC]]) +; NOCOST-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTLOC]]) ; NOCOST-NEXT: call void @outlined_ir_func_1(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[ADD_LOC]], ptr [[DOTLOC]]) ; NOCOST-NEXT: [[ADD_RELOAD:%.*]] = load i32, ptr [[ADD_LOC]], align 4 ; NOCOST-NEXT: [[DOTRELOAD:%.*]] = load i32, ptr [[DOTLOC]], align 4 -; NOCOST-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[ADD_LOC]]) -; NOCOST-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTLOC]]) +; NOCOST-NEXT: call void @llvm.lifetime.end.p0(ptr [[ADD_LOC]]) +; NOCOST-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTLOC]]) ; NOCOST-NEXT: call void @outlined_ir_func_2(i32 [[DOTRELOAD]], i32 [[ADD_RELOAD]], ptr [[RESULT]]) ; NOCOST-NEXT: ret void ; diff --git a/llvm/test/Transforms/IROutliner/outlining-different-output-blocks.ll b/llvm/test/Transforms/IROutliner/outlining-different-output-blocks.ll index 2e1fae3..64e87fc 100644 --- a/llvm/test/Transforms/IROutliner/outlining-different-output-blocks.ll +++ b/llvm/test/Transforms/IROutliner/outlining-different-output-blocks.ll @@ -14,13 +14,13 @@ define void @outline_outputs1() #0 { ; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[OUTPUT:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[RESULT:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[ADD_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTLOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ADD_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTLOC]]) ; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[ADD_LOC]], ptr [[DOTLOC]], i32 0) ; CHECK-NEXT: [[ADD_RELOAD:%.*]] = load i32, ptr [[ADD_LOC]], align 4 ; CHECK-NEXT: [[DOTRELOAD:%.*]] = load i32, ptr [[DOTLOC]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[ADD_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTLOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ADD_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTLOC]]) ; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[OUTPUT]], align 4 ; CHECK-NEXT: call void @outlined_ir_func_1(i32 [[DOTRELOAD]], i32 [[ADD_RELOAD]], ptr [[RESULT]]) ; CHECK-NEXT: ret void @@ -53,13 +53,13 @@ define void @outline_outputs2() #0 { ; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[OUTPUT:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[RESULT:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[SUB_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTLOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[SUB_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTLOC]]) ; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[SUB_LOC]], ptr [[DOTLOC]], i32 1) ; CHECK-NEXT: [[SUB_RELOAD:%.*]] = load i32, ptr [[SUB_LOC]], align 4 ; CHECK-NEXT: [[DOTRELOAD:%.*]] = load i32, ptr [[DOTLOC]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[SUB_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTLOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[SUB_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTLOC]]) ; CHECK-NEXT: call void @outlined_ir_func_1(i32 [[DOTRELOAD]], i32 [[SUB_RELOAD]], ptr [[RESULT]]) ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/IROutliner/outlining-exits-to-phi-node.ll b/llvm/test/Transforms/IROutliner/outlining-exits-to-phi-node.ll index cb5d505..d901955 100644 --- a/llvm/test/Transforms/IROutliner/outlining-exits-to-phi-node.ll +++ b/llvm/test/Transforms/IROutliner/outlining-exits-to-phi-node.ll @@ -43,10 +43,10 @@ first: ; CHECK-NEXT: entry: ; CHECK-NEXT: [[DOTCE_LOC:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTCE_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTCE_LOC]]) ; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[TMP0]], ptr [[DOTCE_LOC]]) ; CHECK-NEXT: [[DOTCE_RELOAD:%.*]] = load i32, ptr [[DOTCE_LOC]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTCE_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTCE_LOC]]) ; CHECK-NEXT: br label [[FIRST:%.*]] ; CHECK: dummy: ; CHECK-NEXT: ret void @@ -59,10 +59,10 @@ first: ; CHECK-NEXT: entry: ; CHECK-NEXT: [[DOTCE_LOC:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTCE_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTCE_LOC]]) ; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[TMP0]], ptr [[DOTCE_LOC]]) ; CHECK-NEXT: [[DOTCE_RELOAD:%.*]] = load i32, ptr [[DOTCE_LOC]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTCE_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTCE_LOC]]) ; CHECK-NEXT: br label [[FIRST:%.*]] ; CHECK: dummy: ; CHECK-NEXT: ret void diff --git a/llvm/test/Transforms/IROutliner/outlining-multiple-exits-diff-outputs.ll b/llvm/test/Transforms/IROutliner/outlining-multiple-exits-diff-outputs.ll index 463e097..9dbfa9e 100644 --- a/llvm/test/Transforms/IROutliner/outlining-multiple-exits-diff-outputs.ll +++ b/llvm/test/Transforms/IROutliner/outlining-multiple-exits-diff-outputs.ll @@ -103,19 +103,19 @@ block_7: ; CHECK-NEXT: [[B2:%.*]] = alloca i32, align 4 ; CHECK-NEXT: br label [[BLOCK_2]] ; CHECK: block_2: -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[A2VAL_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B2VAL_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[AVAL_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[BVAL_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A2VAL_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B2VAL_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[AVAL_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[BVAL_LOC]]) ; CHECK-NEXT: [[TMP0:%.*]] = call i1 @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[RESULT]], ptr [[A2VAL_LOC]], ptr [[B2VAL_LOC]], ptr [[AVAL_LOC]], ptr [[BVAL_LOC]], i32 0) ; CHECK-NEXT: [[A2VAL_RELOAD:%.*]] = load i32, ptr [[A2VAL_LOC]], align 4 ; CHECK-NEXT: [[B2VAL_RELOAD:%.*]] = load i32, ptr [[B2VAL_LOC]], align 4 ; CHECK-NEXT: [[AVAL_RELOAD:%.*]] = load i32, ptr [[AVAL_LOC]], align 4 ; CHECK-NEXT: [[BVAL_RELOAD:%.*]] = load i32, ptr [[BVAL_LOC]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A2VAL_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B2VAL_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[AVAL_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[BVAL_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A2VAL_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B2VAL_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[AVAL_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[BVAL_LOC]]) ; CHECK-NEXT: br i1 [[TMP0]], label [[BLOCK_6:%.*]], label [[BLOCK_7:%.*]] ; CHECK: block_6: ; CHECK-NEXT: [[DIV:%.*]] = udiv i32 [[AVAL_RELOAD]], [[BVAL_RELOAD]] @@ -143,19 +143,19 @@ block_7: ; CHECK-NEXT: [[B2:%.*]] = alloca i32, align 4 ; CHECK-NEXT: br label [[BLOCK_2]] ; CHECK: block_2: -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[A2VAL_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B2VAL_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[ADD_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[MUL_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A2VAL_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B2VAL_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ADD_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[MUL_LOC]]) ; CHECK-NEXT: [[TMP0:%.*]] = call i1 @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[RESULT]], ptr [[A2VAL_LOC]], ptr [[B2VAL_LOC]], ptr [[ADD_LOC]], ptr [[MUL_LOC]], i32 1) ; CHECK-NEXT: [[A2VAL_RELOAD:%.*]] = load i32, ptr [[A2VAL_LOC]], align 4 ; CHECK-NEXT: [[B2VAL_RELOAD:%.*]] = load i32, ptr [[B2VAL_LOC]], align 4 ; CHECK-NEXT: [[ADD_RELOAD:%.*]] = load i32, ptr [[ADD_LOC]], align 4 ; CHECK-NEXT: [[MUL_RELOAD:%.*]] = load i32, ptr [[MUL_LOC]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A2VAL_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B2VAL_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[ADD_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[MUL_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A2VAL_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B2VAL_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ADD_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[MUL_LOC]]) ; CHECK-NEXT: br i1 [[TMP0]], label [[BLOCK_7:%.*]], label [[BLOCK_6:%.*]] ; CHECK: block_6: ; CHECK-NEXT: [[DIFF:%.*]] = sub i32 [[A2VAL_RELOAD]], [[B2VAL_RELOAD]] diff --git a/llvm/test/Transforms/IROutliner/outlining-multiple-exits-one-output-set.ll b/llvm/test/Transforms/IROutliner/outlining-multiple-exits-one-output-set.ll index 5293647..f789735 100644 --- a/llvm/test/Transforms/IROutliner/outlining-multiple-exits-one-output-set.ll +++ b/llvm/test/Transforms/IROutliner/outlining-multiple-exits-one-output-set.ll @@ -124,19 +124,19 @@ block_7: ; CHECK-NEXT: [[B2:%.*]] = alloca i32, align 4 ; CHECK-NEXT: br label [[BLOCK_2]] ; CHECK: block_2: -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[A2VAL_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B2VAL_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[ADD_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[MUL_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A2VAL_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B2VAL_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ADD_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[MUL_LOC]]) ; CHECK-NEXT: [[TMP0:%.*]] = call i1 @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[RESULT]], ptr [[A2VAL_LOC]], ptr [[B2VAL_LOC]], ptr [[ADD_LOC]], ptr [[MUL_LOC]], i32 0) ; CHECK-NEXT: [[A2VAL_RELOAD:%.*]] = load i32, ptr [[A2VAL_LOC]], align 4 ; CHECK-NEXT: [[B2VAL_RELOAD:%.*]] = load i32, ptr [[B2VAL_LOC]], align 4 ; CHECK-NEXT: [[ADD_RELOAD:%.*]] = load i32, ptr [[ADD_LOC]], align 4 ; CHECK-NEXT: [[MUL_RELOAD:%.*]] = load i32, ptr [[MUL_LOC]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A2VAL_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B2VAL_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[ADD_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[MUL_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A2VAL_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B2VAL_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ADD_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[MUL_LOC]]) ; CHECK-NEXT: br i1 [[TMP0]], label [[BLOCK_7:%.*]], label [[BLOCK_6:%.*]] ; CHECK: block_6: ; CHECK-NEXT: [[DIFF:%.*]] = sub i32 [[A2VAL_RELOAD]], [[B2VAL_RELOAD]] diff --git a/llvm/test/Transforms/IROutliner/outlining-multiple-exits.ll b/llvm/test/Transforms/IROutliner/outlining-multiple-exits.ll index 663e6d8..1de13eb 100644 --- a/llvm/test/Transforms/IROutliner/outlining-multiple-exits.ll +++ b/llvm/test/Transforms/IROutliner/outlining-multiple-exits.ll @@ -104,19 +104,19 @@ block_7: ; CHECK-NEXT: [[B2:%.*]] = alloca i32, align 4 ; CHECK-NEXT: br label [[BLOCK_2]] ; CHECK: block_2: -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[A2VAL_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B2VAL_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[AVAL_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[BVAL_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A2VAL_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B2VAL_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[AVAL_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[BVAL_LOC]]) ; CHECK-NEXT: [[TARGETBLOCK:%.*]] = call i1 @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[RESULT]], ptr [[A2VAL_LOC]], ptr [[B2VAL_LOC]], ptr [[AVAL_LOC]], ptr [[BVAL_LOC]]) ; CHECK-NEXT: [[A2VAL_RELOAD:%.*]] = load i32, ptr [[A2VAL_LOC]], align 4 ; CHECK-NEXT: [[B2VAL_RELOAD:%.*]] = load i32, ptr [[B2VAL_LOC]], align 4 ; CHECK-NEXT: [[AVAL_RELOAD:%.*]] = load i32, ptr [[AVAL_LOC]], align 4 ; CHECK-NEXT: [[BVAL_RELOAD:%.*]] = load i32, ptr [[BVAL_LOC]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A2VAL_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B2VAL_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[AVAL_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[BVAL_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A2VAL_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B2VAL_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[AVAL_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[BVAL_LOC]]) ; CHECK-NEXT: br i1 [[TARGETBLOCK]], label [[BLOCK_6:%.*]], label [[BLOCK_7:%.*]] ; CHECK: block_6: ; CHECK-NEXT: [[DIV:%.*]] = udiv i32 [[AVAL_RELOAD]], [[BVAL_RELOAD]] @@ -144,19 +144,19 @@ block_7: ; CHECK-NEXT: [[B2:%.*]] = alloca i32, align 4 ; CHECK-NEXT: br label [[BLOCK_2]] ; CHECK: block_2: -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[A2VAL_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B2VAL_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[AVAL_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[BVAL_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A2VAL_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B2VAL_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[AVAL_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[BVAL_LOC]]) ; CHECK-NEXT: [[TARGETBLOCK:%.*]] = call i1 @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[RESULT]], ptr [[A2VAL_LOC]], ptr [[B2VAL_LOC]], ptr [[AVAL_LOC]], ptr [[BVAL_LOC]]) ; CHECK-NEXT: [[A2VAL_RELOAD:%.*]] = load i32, ptr [[A2VAL_LOC]], align 4 ; CHECK-NEXT: [[B2VAL_RELOAD:%.*]] = load i32, ptr [[B2VAL_LOC]], align 4 ; CHECK-NEXT: [[AVAL_RELOAD:%.*]] = load i32, ptr [[AVAL_LOC]], align 4 ; CHECK-NEXT: [[BVAL_RELOAD:%.*]] = load i32, ptr [[BVAL_LOC]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A2VAL_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B2VAL_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[AVAL_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[BVAL_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A2VAL_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B2VAL_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[AVAL_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[BVAL_LOC]]) ; CHECK-NEXT: br i1 [[TARGETBLOCK]], label [[BLOCK_7:%.*]], label [[BLOCK_6:%.*]] ; CHECK: block_6: ; CHECK-NEXT: [[DIFF:%.*]] = sub i32 [[A2VAL_RELOAD]], [[B2VAL_RELOAD]] diff --git a/llvm/test/Transforms/IROutliner/outlining-remapped-outputs.ll b/llvm/test/Transforms/IROutliner/outlining-remapped-outputs.ll index 6d0b153..77f17c3 100644 --- a/llvm/test/Transforms/IROutliner/outlining-remapped-outputs.ll +++ b/llvm/test/Transforms/IROutliner/outlining-remapped-outputs.ll @@ -17,24 +17,24 @@ define void @outline_outputs1() #0 { ; CHECK-NEXT: [[RESULT:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[OUTPUT2:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[RESULT2:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[ADD_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTLOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ADD_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTLOC]]) ; CHECK-NEXT: call void @outlined_ir_func_0(i32 2, ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[ADD_LOC]], ptr [[DOTLOC]]) ; CHECK-NEXT: [[ADD_RELOAD:%.*]] = load i32, ptr [[ADD_LOC]], align 4 ; CHECK-NEXT: [[DOTRELOAD:%.*]] = load i32, ptr [[DOTLOC]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[ADD_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTLOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ADD_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTLOC]]) ; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[OUTPUT]], align 4 ; CHECK-NEXT: call void @outlined_ir_func_1(i32 [[DOTRELOAD]], i32 [[ADD_RELOAD]], ptr [[RESULT]]) ; CHECK-NEXT: br label [[NEXT:%.*]] ; CHECK: next: -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[ADD2_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTLOC2]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ADD2_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTLOC2]]) ; CHECK-NEXT: call void @outlined_ir_func_0(i32 [[ADD_RELOAD]], ptr [[OUTPUT]], ptr [[RESULT]], ptr [[OUTPUT2]], ptr [[ADD2_LOC]], ptr [[DOTLOC2]]) ; CHECK-NEXT: [[ADD2_RELOAD:%.*]] = load i32, ptr [[ADD2_LOC]], align 4 ; CHECK-NEXT: [[DOTRELOAD3:%.*]] = load i32, ptr [[DOTLOC2]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[ADD2_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTLOC2]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ADD2_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTLOC2]]) ; CHECK-NEXT: call void @outlined_ir_func_1(i32 [[DOTRELOAD3]], i32 [[ADD2_RELOAD]], ptr [[RESULT2]]) ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/IROutliner/outlining-same-output-blocks.ll b/llvm/test/Transforms/IROutliner/outlining-same-output-blocks.ll index 380c53d..cc4f6ef 100644 --- a/llvm/test/Transforms/IROutliner/outlining-same-output-blocks.ll +++ b/llvm/test/Transforms/IROutliner/outlining-same-output-blocks.ll @@ -14,13 +14,13 @@ define void @outline_outputs1() #0 { ; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[OUTPUT:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[RESULT:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[ADD_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTLOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ADD_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTLOC]]) ; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[ADD_LOC]], ptr [[DOTLOC]]) ; CHECK-NEXT: [[ADD_RELOAD:%.*]] = load i32, ptr [[ADD_LOC]], align 4 ; CHECK-NEXT: [[DOTRELOAD:%.*]] = load i32, ptr [[DOTLOC]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[ADD_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTLOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ADD_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTLOC]]) ; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[OUTPUT]], align 4 ; CHECK-NEXT: call void @outlined_ir_func_1(i32 [[DOTRELOAD]], i32 [[ADD_RELOAD]], ptr [[RESULT]]) ; CHECK-NEXT: ret void @@ -52,13 +52,13 @@ define void @outline_outputs2() #0 { ; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[OUTPUT:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[RESULT:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[ADD_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTLOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ADD_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTLOC]]) ; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[ADD_LOC]], ptr [[DOTLOC]]) ; CHECK-NEXT: [[ADD_RELOAD:%.*]] = load i32, ptr [[ADD_LOC]], align 4 ; CHECK-NEXT: [[DOTRELOAD:%.*]] = load i32, ptr [[DOTLOC]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[ADD_LOC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTLOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ADD_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTLOC]]) ; CHECK-NEXT: call void @outlined_ir_func_1(i32 [[DOTRELOAD]], i32 [[ADD_RELOAD]], ptr [[RESULT]]) ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/IROutliner/phi-node-exit-path-order.ll b/llvm/test/Transforms/IROutliner/phi-node-exit-path-order.ll index 4bbe6e7..15d313a 100644 --- a/llvm/test/Transforms/IROutliner/phi-node-exit-path-order.ll +++ b/llvm/test/Transforms/IROutliner/phi-node-exit-path-order.ll @@ -58,10 +58,10 @@ bb5: ; CHECK-NEXT: br label [[BB5:%.*]] ; CHECK: bb2: ; CHECK-NEXT: [[A:%.*]] = add i32 [[TMP0:%.*]], [[TMP1:%.*]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[F_CE_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[F_CE_LOC]]) ; CHECK-NEXT: call void @outlined_ir_func_0(i32 [[TMP0]], i32 [[TMP1]], ptr [[F_CE_LOC]], i32 0) ; CHECK-NEXT: [[F_CE_RELOAD:%.*]] = load i32, ptr [[F_CE_LOC]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[F_CE_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[F_CE_LOC]]) ; CHECK-NEXT: br label [[BB5]] ; CHECK: bb4: ; CHECK-NEXT: [[E:%.*]] = sub i32 [[TMP0]], [[TMP1]] @@ -77,10 +77,10 @@ bb5: ; CHECK-NEXT: br label [[BB5:%.*]] ; CHECK: bb2: ; CHECK-NEXT: [[A:%.*]] = sub i32 [[TMP0:%.*]], [[TMP1:%.*]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[F_CE_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[F_CE_LOC]]) ; CHECK-NEXT: call void @outlined_ir_func_0(i32 [[TMP0]], i32 [[TMP1]], ptr [[F_CE_LOC]], i32 1) ; CHECK-NEXT: [[F_CE_RELOAD:%.*]] = load i32, ptr [[F_CE_LOC]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[F_CE_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[F_CE_LOC]]) ; CHECK-NEXT: br label [[BB5]] ; CHECK: bb4: ; CHECK-NEXT: [[E:%.*]] = add i32 [[TMP0]], [[TMP1]] diff --git a/llvm/test/Transforms/IROutliner/phi-nodes-output-overload.ll b/llvm/test/Transforms/IROutliner/phi-nodes-output-overload.ll index 9631bfa..9e443ab 100644 --- a/llvm/test/Transforms/IROutliner/phi-nodes-output-overload.ll +++ b/llvm/test/Transforms/IROutliner/phi-nodes-output-overload.ll @@ -43,10 +43,10 @@ next: ; CHECK-NEXT: entry: ; CHECK-NEXT: [[DOTCE_LOC:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTCE_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTCE_LOC]]) ; CHECK-NEXT: [[TMP1:%.*]] = call i1 @outlined_ir_func_0(ptr [[TMP0]], ptr [[DOTCE_LOC]], i32 0) ; CHECK-NEXT: [[DOTCE_RELOAD:%.*]] = load i32, ptr [[DOTCE_LOC]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTCE_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTCE_LOC]]) ; CHECK-NEXT: br i1 [[TMP1]], label [[FIRST:%.*]], label [[NEXT:%.*]] ; CHECK: first: ; CHECK-NEXT: [[TMP2:%.*]] = phi i32 [ [[DOTCE_RELOAD]], [[ENTRY:%.*]] ] @@ -59,10 +59,10 @@ next: ; CHECK-NEXT: entry: ; CHECK-NEXT: [[DOTCE_LOC:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTCE_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTCE_LOC]]) ; CHECK-NEXT: [[TMP1:%.*]] = call i1 @outlined_ir_func_0(ptr [[TMP0]], ptr [[DOTCE_LOC]], i32 1) ; CHECK-NEXT: [[DOTCE_RELOAD:%.*]] = load i32, ptr [[DOTCE_LOC]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTCE_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTCE_LOC]]) ; CHECK-NEXT: br i1 [[TMP1]], label [[FIRST:%.*]], label [[NEXT:%.*]] ; CHECK: first: ; CHECK-NEXT: ret void diff --git a/llvm/test/Transforms/IROutliner/phi-nodes-parent-block-referential.ll b/llvm/test/Transforms/IROutliner/phi-nodes-parent-block-referential.ll index 608abfa..02930d7 100644 --- a/llvm/test/Transforms/IROutliner/phi-nodes-parent-block-referential.ll +++ b/llvm/test/Transforms/IROutliner/phi-nodes-parent-block-referential.ll @@ -53,10 +53,10 @@ first: ; CHECK: test1: ; CHECK-NEXT: [[TMP1:%.*]] = phi i32 [ [[E_RELOAD:%.*]], [[TEST1]] ], [ [[Y]], [[ENTRY:%.*]] ] ; CHECK-NEXT: [[TMP2:%.*]] = phi i32 [ [[E_RELOAD]], [[TEST1]] ], [ [[Y]], [[ENTRY]] ] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[E_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[E_LOC]]) ; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[TMP0]], i32 [[C]], ptr [[E_LOC]]) ; CHECK-NEXT: [[E_RELOAD]] = load i32, ptr [[E_LOC]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[E_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[E_LOC]]) ; CHECK-NEXT: [[TMP3:%.*]] = sub i32 [[C]], [[C]] ; CHECK-NEXT: br i1 true, label [[FIRST:%.*]], label [[TEST1]] ; CHECK: first: @@ -75,10 +75,10 @@ first: ; CHECK: test1: ; CHECK-NEXT: [[TMP1:%.*]] = phi i32 [ [[E_RELOAD:%.*]], [[TEST1]] ], [ [[Y]], [[ENTRY:%.*]] ] ; CHECK-NEXT: [[TMP2:%.*]] = phi i32 [ [[Y]], [[ENTRY]] ], [ [[E_RELOAD]], [[TEST1]] ] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[E_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[E_LOC]]) ; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[TMP0]], i32 [[C]], ptr [[E_LOC]]) ; CHECK-NEXT: [[E_RELOAD]] = load i32, ptr [[E_LOC]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[E_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[E_LOC]]) ; CHECK-NEXT: [[TMP3:%.*]] = mul i32 [[C]], [[C]] ; CHECK-NEXT: br i1 true, label [[FIRST:%.*]], label [[TEST1]] ; CHECK: first: diff --git a/llvm/test/Transforms/IROutliner/region-inputs-in-phi-nodes.ll b/llvm/test/Transforms/IROutliner/region-inputs-in-phi-nodes.ll index f46035a..25b1e8e 100644 --- a/llvm/test/Transforms/IROutliner/region-inputs-in-phi-nodes.ll +++ b/llvm/test/Transforms/IROutliner/region-inputs-in-phi-nodes.ll @@ -53,10 +53,10 @@ next: ; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[C:%.*]] = load i32, ptr [[TMP0]], align 4 ; CHECK-NEXT: [[Z:%.*]] = add i32 [[C]], [[C]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTCE_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTCE_LOC]]) ; CHECK-NEXT: [[TARGETBLOCK:%.*]] = call i1 @outlined_ir_func_0(ptr [[TMP0]], i32 [[C]], ptr [[DOTCE_LOC]]) ; CHECK-NEXT: [[DOTCE_RELOAD:%.*]] = load i32, ptr [[DOTCE_LOC]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTCE_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTCE_LOC]]) ; CHECK-NEXT: br i1 [[TARGETBLOCK]], label [[FIRST:%.*]], label [[NEXT:%.*]] ; CHECK: dummy: ; CHECK-NEXT: ret void @@ -73,10 +73,10 @@ next: ; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[C:%.*]] = load i32, ptr [[TMP0]], align 4 ; CHECK-NEXT: [[Z:%.*]] = mul i32 [[C]], [[C]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTCE_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTCE_LOC]]) ; CHECK-NEXT: [[TARGETBLOCK:%.*]] = call i1 @outlined_ir_func_0(ptr [[TMP0]], i32 [[C]], ptr [[DOTCE_LOC]]) ; CHECK-NEXT: [[DOTCE_RELOAD:%.*]] = load i32, ptr [[DOTCE_LOC]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTCE_LOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTCE_LOC]]) ; CHECK-NEXT: br i1 [[TARGETBLOCK]], label [[FIRST:%.*]], label [[NEXT:%.*]] ; CHECK: dummy: ; CHECK-NEXT: ret void diff --git a/llvm/test/Transforms/IndVarSimplify/exit_value_test2.ll b/llvm/test/Transforms/IndVarSimplify/exit_value_test2.ll index 66c7222..697c816 100644 --- a/llvm/test/Transforms/IndVarSimplify/exit_value_test2.ll +++ b/llvm/test/Transforms/IndVarSimplify/exit_value_test2.ll @@ -6,14 +6,14 @@ ; udiv will be introduced by expand and the cost will be high. declare void @_Z3mixRjj(ptr dereferenceable(4), i32) -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) define i32 @_Z3fooPKcjj(ptr nocapture readonly %s, i32 %len, i32 %c) { ; CHECK-LABEL: @_Z3fooPKcjj( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]]) ; CHECK-NEXT: store i32 -1640531527, ptr [[A]], align 4 ; CHECK-NEXT: [[CMP8:%.*]] = icmp ugt i32 [[LEN:%.*]], 11 ; CHECK-NEXT: br i1 [[CMP8]], label [[WHILE_BODY_LR_PH:%.*]], label [[WHILE_END:%.*]] @@ -40,12 +40,12 @@ define i32 @_Z3fooPKcjj(ptr nocapture readonly %s, i32 %len, i32 %c) { ; CHECK-NEXT: [[KEYLEN_0_LCSSA:%.*]] = phi i32 [ [[SUB_LCSSA]], [[WHILE_COND_WHILE_END_CRIT_EDGE]] ], [ [[LEN]], [[ENTRY:%.*]] ] ; CHECK-NEXT: call void @_Z3mixRjj(ptr dereferenceable(4) [[A]], i32 [[KEYLEN_0_LCSSA]]) ; CHECK-NEXT: [[T4:%.*]] = load i32, ptr [[A]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]]) ; CHECK-NEXT: ret i32 [[T4]] ; entry: %a = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr %a) + call void @llvm.lifetime.start.p0(ptr %a) store i32 -1640531527, ptr %a, align 4 %cmp8 = icmp ugt i32 %len, 11 br i1 %cmp8, label %while.body.lr.ph, label %while.end @@ -76,7 +76,7 @@ while.end: ; preds = %while.cond.while.en %keylen.0.lcssa = phi i32 [ %sub.lcssa, %while.cond.while.end_crit_edge ], [ %len, %entry ] call void @_Z3mixRjj(ptr dereferenceable(4) %a, i32 %keylen.0.lcssa) %t4 = load i32, ptr %a, align 4 - call void @llvm.lifetime.end.p0(i64 4, ptr %a) + call void @llvm.lifetime.end.p0(ptr %a) ret i32 %t4 } diff --git a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/lifetime.ll b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/lifetime.ll index 053d073..4c04e6d 100644 --- a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/lifetime.ll +++ b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/lifetime.ll @@ -4,17 +4,17 @@ define i32 @lifetime_flat_pointer() { ; CHECK-LABEL: define i32 @lifetime_flat_pointer() { ; CHECK-NEXT: [[ALLOCA:%.*]] = alloca i32, align 4, addrspace(5) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[ALLOCA]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[ALLOCA]]) ; CHECK-NEXT: store i32 1, ptr addrspace(5) [[ALLOCA]], align 4 ; CHECK-NEXT: [[RET:%.*]] = load i32, ptr addrspace(5) [[ALLOCA]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[ALLOCA]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[ALLOCA]]) ; CHECK-NEXT: ret i32 [[RET]] ; %alloca = alloca i32, align 4, addrspace(5) %flat = addrspacecast ptr addrspace(5) %alloca to ptr - call void @llvm.lifetime.start(i64 4, ptr addrspace(5) %alloca) + call void @llvm.lifetime.start(ptr addrspace(5) %alloca) store i32 1, ptr %flat, align 4 %ret = load i32, ptr %flat, align 4 - call void @llvm.lifetime.end(i64 4, ptr addrspace(5) %alloca) + call void @llvm.lifetime.end(ptr addrspace(5) %alloca) ret i32 %ret } diff --git a/llvm/test/Transforms/InferAddressSpaces/NVPTX/lifetime.ll b/llvm/test/Transforms/InferAddressSpaces/NVPTX/lifetime.ll index 31e914a..1a21416 100644 --- a/llvm/test/Transforms/InferAddressSpaces/NVPTX/lifetime.ll +++ b/llvm/test/Transforms/InferAddressSpaces/NVPTX/lifetime.ll @@ -7,20 +7,20 @@ define i32 @lifetime_flat_pointer() { ; CHECK-LABEL: define i32 @lifetime_flat_pointer() { ; CHECK-NEXT: [[ALLOCA:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[TMP1:%.*]] = addrspacecast ptr [[ALLOCA]] to ptr addrspace(5) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[ALLOCA]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ALLOCA]]) ; CHECK-NEXT: store i32 1, ptr addrspace(5) [[TMP1]], align 4 ; CHECK-NEXT: [[RET:%.*]] = load i32, ptr addrspace(5) [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[ALLOCA]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ALLOCA]]) ; CHECK-NEXT: ret i32 [[RET]] ; %alloca = alloca i32, align 4 %1 = addrspacecast ptr %alloca to ptr addrspace(5) - call void @llvm.lifetime.start.p0(i64 4, ptr %alloca) + call void @llvm.lifetime.start.p0(ptr %alloca) store i32 1, ptr addrspace(5) %1, align 4 %ret = load i32, ptr addrspace(5) %1, align 4 - call void @llvm.lifetime.end.p0(i64 4, ptr %alloca) + call void @llvm.lifetime.end.p0(ptr %alloca) ret i32 %ret } -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) diff --git a/llvm/test/Transforms/InferAlignment/propagate-from-other-load-stores.ll b/llvm/test/Transforms/InferAlignment/propagate-from-other-load-stores.ll new file mode 100644 index 0000000..3fc7c59 --- /dev/null +++ b/llvm/test/Transforms/InferAlignment/propagate-from-other-load-stores.ll @@ -0,0 +1,194 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt < %s -passes=infer-alignment -S | FileCheck %s +%struct.S1 = type { %struct.float3, %struct.float3, i32, i32 } +%struct.float3 = type { float, float, float } + + +; ------------------------------------------------------------------------------ +; Test that we can propagate the align 16 to the load and store that are set to align 4 +; ------------------------------------------------------------------------------ + +define void @prop_align(ptr %v, ptr %vout) { +; CHECK-LABEL: define void @prop_align( +; CHECK-SAME: ptr [[V:%.*]], ptr [[VOUT:%.*]]) { +; CHECK-NEXT: [[DOTUNPACK_UNPACK:%.*]] = load float, ptr [[V]], align 16 +; CHECK-NEXT: [[DOTUNPACK_ELT7:%.*]] = getelementptr inbounds nuw i8, ptr [[V]], i64 4 +; CHECK-NEXT: [[DOTUNPACK_UNPACK8:%.*]] = load float, ptr [[DOTUNPACK_ELT7]], align 4 +; CHECK-NEXT: [[DOTUNPACK_ELT9:%.*]] = getelementptr inbounds nuw i8, ptr [[V]], i64 8 +; CHECK-NEXT: [[DOTUNPACK_UNPACK10:%.*]] = load float, ptr [[DOTUNPACK_ELT9]], align 8 +; CHECK-NEXT: [[DOTELT1:%.*]] = getelementptr inbounds nuw i8, ptr [[V]], i64 12 +; CHECK-NEXT: [[DOTUNPACK2_UNPACK:%.*]] = load float, ptr [[DOTELT1]], align 4 +; CHECK-NEXT: [[DOTUNPACK2_ELT12:%.*]] = getelementptr inbounds nuw i8, ptr [[V]], i64 16 +; CHECK-NEXT: [[DOTUNPACK2_UNPACK13:%.*]] = load float, ptr [[DOTUNPACK2_ELT12]], align 16 +; CHECK-NEXT: [[DOTUNPACK2_ELT14:%.*]] = getelementptr inbounds nuw i8, ptr [[V]], i64 20 +; CHECK-NEXT: [[DOTUNPACK2_UNPACK15:%.*]] = load float, ptr [[DOTUNPACK2_ELT14]], align 4 +; CHECK-NEXT: [[DOTELT3:%.*]] = getelementptr inbounds nuw i8, ptr [[V]], i64 24 +; CHECK-NEXT: [[DOTUNPACK4:%.*]] = load i32, ptr [[DOTELT3]], align 8 +; CHECK-NEXT: [[DOTELT5:%.*]] = getelementptr inbounds nuw i8, ptr [[V]], i64 28 +; CHECK-NEXT: [[DOTUNPACK6:%.*]] = load i32, ptr [[DOTELT5]], align 4 +; CHECK-NEXT: store float [[DOTUNPACK_UNPACK]], ptr [[VOUT]], align 16 +; CHECK-NEXT: [[VOUT_REPACK23:%.*]] = getelementptr inbounds nuw i8, ptr [[VOUT]], i64 4 +; CHECK-NEXT: store float [[DOTUNPACK_UNPACK8]], ptr [[VOUT_REPACK23]], align 4 +; CHECK-NEXT: [[VOUT_REPACK25:%.*]] = getelementptr inbounds nuw i8, ptr [[VOUT]], i64 8 +; CHECK-NEXT: store float [[DOTUNPACK_UNPACK10]], ptr [[VOUT_REPACK25]], align 8 +; CHECK-NEXT: [[VOUT_REPACK17:%.*]] = getelementptr inbounds nuw i8, ptr [[VOUT]], i64 12 +; CHECK-NEXT: store float [[DOTUNPACK2_UNPACK]], ptr [[VOUT_REPACK17]], align 4 +; CHECK-NEXT: [[VOUT_REPACK17_REPACK27:%.*]] = getelementptr inbounds nuw i8, ptr [[VOUT]], i64 16 +; CHECK-NEXT: store float [[DOTUNPACK2_UNPACK13]], ptr [[VOUT_REPACK17_REPACK27]], align 16 +; CHECK-NEXT: [[VOUT_REPACK17_REPACK29:%.*]] = getelementptr inbounds nuw i8, ptr [[VOUT]], i64 20 +; CHECK-NEXT: store float [[DOTUNPACK2_UNPACK15]], ptr [[VOUT_REPACK17_REPACK29]], align 4 +; CHECK-NEXT: [[VOUT_REPACK19:%.*]] = getelementptr inbounds nuw i8, ptr [[VOUT]], i64 24 +; CHECK-NEXT: store i32 [[DOTUNPACK4]], ptr [[VOUT_REPACK19]], align 8 +; CHECK-NEXT: [[VOUT_REPACK21:%.*]] = getelementptr inbounds nuw i8, ptr [[VOUT]], i64 28 +; CHECK-NEXT: store i32 [[DOTUNPACK6]], ptr [[VOUT_REPACK21]], align 4 +; CHECK-NEXT: ret void +; + %.unpack.unpack = load float, ptr %v, align 16 + %.unpack.elt7 = getelementptr inbounds nuw i8, ptr %v, i64 4 + %.unpack.unpack8 = load float, ptr %.unpack.elt7, align 4 + %.unpack.elt9 = getelementptr inbounds nuw i8, ptr %v, i64 8 + %.unpack.unpack10 = load float, ptr %.unpack.elt9, align 8 + %.elt1 = getelementptr inbounds nuw i8, ptr %v, i64 12 + %.unpack2.unpack = load float, ptr %.elt1, align 4 + %.unpack2.elt12 = getelementptr inbounds nuw i8, ptr %v, i64 16 + %.unpack2.unpack13 = load float, ptr %.unpack2.elt12, align 4 + %.unpack2.elt14 = getelementptr inbounds nuw i8, ptr %v, i64 20 + %.unpack2.unpack15 = load float, ptr %.unpack2.elt14, align 4 + %.elt3 = getelementptr inbounds nuw i8, ptr %v, i64 24 + %.unpack4 = load i32, ptr %.elt3, align 8 + %.elt5 = getelementptr inbounds nuw i8, ptr %v, i64 28 + %.unpack6 = load i32, ptr %.elt5, align 4 + store float %.unpack.unpack, ptr %vout, align 16 + %vout.repack23 = getelementptr inbounds nuw i8, ptr %vout, i64 4 + store float %.unpack.unpack8, ptr %vout.repack23, align 4 + %vout.repack25 = getelementptr inbounds nuw i8, ptr %vout, i64 8 + store float %.unpack.unpack10, ptr %vout.repack25, align 8 + %vout.repack17 = getelementptr inbounds nuw i8, ptr %vout, i64 12 + store float %.unpack2.unpack, ptr %vout.repack17, align 4 + %vout.repack17.repack27 = getelementptr inbounds nuw i8, ptr %vout, i64 16 + store float %.unpack2.unpack13, ptr %vout.repack17.repack27, align 4 + %vout.repack17.repack29 = getelementptr inbounds nuw i8, ptr %vout, i64 20 + store float %.unpack2.unpack15, ptr %vout.repack17.repack29, align 4 + %vout.repack19 = getelementptr inbounds nuw i8, ptr %vout, i64 24 + store i32 %.unpack4, ptr %vout.repack19, align 8 + %vout.repack21 = getelementptr inbounds nuw i8, ptr %vout, i64 28 + store i32 %.unpack6, ptr %vout.repack21, align 4 + ret void +} + +; ------------------------------------------------------------------------------ +; Test that alignment is not propagated from a source that does not dominate the destination +; ------------------------------------------------------------------------------ + +define void @no_prop_align(ptr %v, ptr %vout, i1 %cond) { +; CHECK-LABEL: define void @no_prop_align( +; CHECK-SAME: ptr [[V:%.*]], ptr [[VOUT:%.*]], i1 [[COND:%.*]]) { +; CHECK-NEXT: br i1 [[COND]], label %[[BRANCH1:.*]], label %[[BRANCH2:.*]] +; CHECK: [[BRANCH1]]: +; CHECK-NEXT: [[DOTUNPACK_UNPACK:%.*]] = load float, ptr [[V]], align 16 +; CHECK-NEXT: [[DOTUNPACK_ELT7:%.*]] = getelementptr inbounds nuw i8, ptr [[V]], i64 4 +; CHECK-NEXT: [[DOTUNPACK_UNPACK8:%.*]] = load float, ptr [[DOTUNPACK_ELT7]], align 4 +; CHECK-NEXT: [[DOTUNPACK_ELT9:%.*]] = getelementptr inbounds nuw i8, ptr [[V]], i64 8 +; CHECK-NEXT: [[DOTUNPACK_UNPACK10:%.*]] = load float, ptr [[DOTUNPACK_ELT9]], align 8 +; CHECK-NEXT: [[DOTELT1:%.*]] = getelementptr inbounds nuw i8, ptr [[V]], i64 12 +; CHECK-NEXT: [[DOTUNPACK2_UNPACK:%.*]] = load float, ptr [[DOTELT1]], align 4 +; CHECK-NEXT: br label %[[END:.*]] +; CHECK: [[BRANCH2]]: +; CHECK-NEXT: [[DOTUNPACK2_ELT12:%.*]] = getelementptr inbounds nuw i8, ptr [[V]], i64 16 +; CHECK-NEXT: [[DOTUNPACK2_UNPACK13:%.*]] = load float, ptr [[DOTUNPACK2_ELT12]], align 4 +; CHECK-NEXT: [[DOTUNPACK2_ELT14:%.*]] = getelementptr inbounds nuw i8, ptr [[V]], i64 20 +; CHECK-NEXT: [[DOTUNPACK2_UNPACK15:%.*]] = load float, ptr [[DOTUNPACK2_ELT14]], align 4 +; CHECK-NEXT: [[DOTELT3:%.*]] = getelementptr inbounds nuw i8, ptr [[V]], i64 24 +; CHECK-NEXT: [[DOTUNPACK4:%.*]] = load i32, ptr [[DOTELT3]], align 8 +; CHECK-NEXT: [[DOTELT5:%.*]] = getelementptr inbounds nuw i8, ptr [[V]], i64 28 +; CHECK-NEXT: [[DOTUNPACK6:%.*]] = load i32, ptr [[DOTELT5]], align 4 +; CHECK-NEXT: br label %[[END]] +; CHECK: [[END]]: +; CHECK-NEXT: ret void +; + br i1 %cond, label %branch1, label %branch2 + +branch1: + %.unpack.unpack = load float, ptr %v, align 16 + %.unpack.elt7 = getelementptr inbounds nuw i8, ptr %v, i64 4 + %.unpack.unpack8 = load float, ptr %.unpack.elt7, align 4 + %.unpack.elt9 = getelementptr inbounds nuw i8, ptr %v, i64 8 + %.unpack.unpack10 = load float, ptr %.unpack.elt9, align 8 + %.elt1 = getelementptr inbounds nuw i8, ptr %v, i64 12 + %.unpack2.unpack = load float, ptr %.elt1, align 4 + br label %end + +branch2: + %.unpack2.elt12 = getelementptr inbounds nuw i8, ptr %v, i64 16 + %.unpack2.unpack13 = load float, ptr %.unpack2.elt12, align 4 + %.unpack2.elt14 = getelementptr inbounds nuw i8, ptr %v, i64 20 + %.unpack2.unpack15 = load float, ptr %.unpack2.elt14, align 4 + %.elt3 = getelementptr inbounds nuw i8, ptr %v, i64 24 + %.unpack4 = load i32, ptr %.elt3, align 8 + %.elt5 = getelementptr inbounds nuw i8, ptr %v, i64 28 + %.unpack6 = load i32, ptr %.elt5, align 4 + br label %end + +end: + ret void +} + +; ------------------------------------------------------------------------------ +; Test that we can propagate to/from negative offset GEPs +; ------------------------------------------------------------------------------ + +define void @prop_align_negative_offset(ptr %v) { +; CHECK-LABEL: define void @prop_align_negative_offset( +; CHECK-SAME: ptr [[V:%.*]]) { +; CHECK-NEXT: [[LOADALIGNED:%.*]] = load float, ptr [[V]], align 16 +; CHECK-NEXT: [[GEPNEGATIVE:%.*]] = getelementptr inbounds nuw i8, ptr [[V]], i64 -16 +; CHECK-NEXT: [[LOADUNALIGNED:%.*]] = load float, ptr [[GEPNEGATIVE]], align 16 +; CHECK-NEXT: ret void +; + %loadAligned= load float, ptr %v, align 16 + %gepNegative = getelementptr inbounds nuw i8, ptr %v, i64 -16 + %loadUnaligned = load float, ptr %gepNegative, align 4 + ret void +} + +define void @prop_align_negative_offset_2(ptr %v) { +; CHECK-LABEL: define void @prop_align_negative_offset_2( +; CHECK-SAME: ptr [[V:%.*]]) { +; CHECK-NEXT: [[GEPNEGATIVE:%.*]] = getelementptr inbounds nuw i8, ptr [[V]], i64 -16 +; CHECK-NEXT: [[LOADALIGNED:%.*]] = load float, ptr [[GEPNEGATIVE]], align 16 +; CHECK-NEXT: [[LOADUNALIGNED:%.*]] = load float, ptr [[V]], align 16 +; CHECK-NEXT: ret void +; + %gepNegative = getelementptr inbounds nuw i8, ptr %v, i64 -16 + %loadAligned = load float, ptr %gepNegative, align 16 + %loadUnaligned= load float, ptr %v, align 4 + ret void +} + +define void @prop_align_negative_offset_3(ptr %v) { +; CHECK-LABEL: define void @prop_align_negative_offset_3( +; CHECK-SAME: ptr [[V:%.*]]) { +; CHECK-NEXT: [[LOADALIGNED:%.*]] = load float, ptr [[V]], align 16 +; CHECK-NEXT: [[GEPNEGATIVE:%.*]] = getelementptr inbounds nuw i8, ptr [[V]], i64 -8 +; CHECK-NEXT: [[LOADUNALIGNED:%.*]] = load float, ptr [[GEPNEGATIVE]], align 8 +; CHECK-NEXT: ret void +; + %loadAligned= load float, ptr %v, align 16 + %gepNegative = getelementptr inbounds nuw i8, ptr %v, i64 -8 + %loadUnaligned = load float, ptr %gepNegative, align 4 + ret void +} + +define void @prop_align_negative_offset_4(ptr %v) { +; CHECK-LABEL: define void @prop_align_negative_offset_4( +; CHECK-SAME: ptr [[V:%.*]]) { +; CHECK-NEXT: [[LOADALIGNED:%.*]] = load float, ptr [[V]], align 16 +; CHECK-NEXT: [[GEPNEGATIVE:%.*]] = getelementptr inbounds nuw i8, ptr [[V]], i64 -20 +; CHECK-NEXT: [[LOADUNALIGNED:%.*]] = load float, ptr [[GEPNEGATIVE]], align 4 +; CHECK-NEXT: ret void +; + %loadAligned= load float, ptr %v, align 16 + %gepNegative = getelementptr inbounds nuw i8, ptr %v, i64 -20 + %loadUnaligned = load float, ptr %gepNegative, align 4 + ret void +} diff --git a/llvm/test/Transforms/Inline/AArch64/sve-alloca-merge.ll b/llvm/test/Transforms/Inline/AArch64/sve-alloca-merge.ll index c1375cb..54daf7c 100644 --- a/llvm/test/Transforms/Inline/AArch64/sve-alloca-merge.ll +++ b/llvm/test/Transforms/Inline/AArch64/sve-alloca-merge.ll @@ -14,8 +14,8 @@ entry: define i64 @foo() { ; CHECK-LABEL: @foo( -; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %{{.*}}) -; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %{{.*}}) +; CHECK: call void @llvm.lifetime.start.p0(ptr %{{.*}}) +; CHECK: call void @llvm.lifetime.end.p0(ptr %{{.*}}) entry: %a = alloca <vscale x 2 x i64>, align 16 store <vscale x 2 x i64> zeroinitializer, ptr %a, align 16 diff --git a/llvm/test/Transforms/Inline/ML/state-tracking-coro.ll b/llvm/test/Transforms/Inline/ML/state-tracking-coro.ll index bbce9e4..c3f6dd7 100644 --- a/llvm/test/Transforms/Inline/ML/state-tracking-coro.ll +++ b/llvm/test/Transforms/Inline/ML/state-tracking-coro.ll @@ -31,10 +31,10 @@ entry: await.ready: %StrayCoroSave = call token @llvm.coro.save(ptr null) %val = load i32, ptr %ref.tmp7 - call void @llvm.lifetime.start.p0(i64 4, ptr %testval) + call void @llvm.lifetime.start.p0(ptr %testval) %test = load i32, ptr %testval call void @print(i32 %test) - call void @llvm.lifetime.end.p0(i64 4, ptr %testval) + call void @llvm.lifetime.end.p0(ptr %testval) call void @print(i32 %val) br label %exit exit: @@ -54,5 +54,5 @@ declare i8 @llvm.coro.suspend(token, i1) #3 declare void @"\01??3@YAXPEAX@Z"(ptr) local_unnamed_addr #10 declare ptr @llvm.coro.free(token, ptr nocapture readonly) #2 declare i1 @llvm.coro.end(ptr, i1) #3 -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #4 -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #4 +declare void @llvm.lifetime.start.p0(ptr nocapture) #4 +declare void @llvm.lifetime.end.p0(ptr nocapture) #4 diff --git a/llvm/test/Transforms/Inline/SystemZ/inline-target-attr.ll b/llvm/test/Transforms/Inline/SystemZ/inline-target-attr.ll index b5c4f42..71b463b 100644 --- a/llvm/test/Transforms/Inline/SystemZ/inline-target-attr.ll +++ b/llvm/test/Transforms/Inline/SystemZ/inline-target-attr.ll @@ -12,28 +12,28 @@ entry: declare i32 @baz(...) #0 -define i32 @bar() #1 { +define i32 @features_subset() #1 { entry: %call = call i32 @foo() ret i32 %call -; CHECK-LABEL: bar -; CHECK: call i32 @foo() +; CHECK-LABEL: features_subset +; CHECK: call i32 (...) @baz() } -define i32 @qux() #0 { +define i32 @features_equal() #0 { entry: %call = call i32 @foo() ret i32 %call -; CHECK-LABEL: qux +; CHECK-LABEL: features_equal ; CHECK: call i32 (...) @baz() } -define i32 @quux() #2 { +define i32 @features_different() #2 { entry: - %call = call i32 @bar() + %call = call i32 @foo() ret i32 %call -; CHECK-LABEL: quux -; CHECK: call i32 @bar() +; CHECK-LABEL: features_different +; CHECK: call i32 @foo() } diff --git a/llvm/test/Transforms/Inline/access-attributes-prop.ll b/llvm/test/Transforms/Inline/access-attributes-prop.ll index 28fa44e..fed2590 100644 --- a/llvm/test/Transforms/Inline/access-attributes-prop.ll +++ b/llvm/test/Transforms/Inline/access-attributes-prop.ll @@ -410,9 +410,9 @@ define void @prop_fn_decl_fail_alloca(ptr %p) { ; CHECK-LABEL: define {{[^@]+}}@prop_fn_decl_fail_alloca ; CHECK-SAME: (ptr [[P:%.*]]) { ; CHECK-NEXT: [[A_I:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[A_I]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_I]]) ; CHECK-NEXT: call void @bar2(ptr [[P]], ptr [[A_I]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[A_I]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_I]]) ; CHECK-NEXT: call void @bar1(ptr [[P]]) ; CHECK-NEXT: ret void ; @@ -425,9 +425,9 @@ define void @prop_cb_def_wr_fail_alloca(ptr %p) { ; CHECK-LABEL: define {{[^@]+}}@prop_cb_def_wr_fail_alloca ; CHECK-SAME: (ptr [[P:%.*]]) { ; CHECK-NEXT: [[A_I:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[A_I]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_I]]) ; CHECK-NEXT: call void @bar2(ptr [[P]], ptr [[A_I]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[A_I]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_I]]) ; CHECK-NEXT: call void @bar1(ptr [[P]]) ; CHECK-NEXT: ret void ; @@ -440,10 +440,10 @@ define void @prop_fn_decl_partially_okay_alloca(ptr %p) { ; CHECK-LABEL: define {{[^@]+}}@prop_fn_decl_partially_okay_alloca ; CHECK-SAME: (ptr [[P:%.*]]) { ; CHECK-NEXT: [[A_I:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[A_I]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_I]]) ; CHECK-NEXT: call void @bar1(ptr [[P]]) ; CHECK-NEXT: call void @bar2(ptr [[P]], ptr [[A_I]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[A_I]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_I]]) ; CHECK-NEXT: call void @bar1(ptr [[P]]) ; CHECK-NEXT: ret void ; @@ -456,10 +456,10 @@ define void @prop_cb_def_wr_partially_okay_alloca(ptr %p) { ; CHECK-LABEL: define {{[^@]+}}@prop_cb_def_wr_partially_okay_alloca ; CHECK-SAME: (ptr [[P:%.*]]) { ; CHECK-NEXT: [[A_I:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[A_I]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_I]]) ; CHECK-NEXT: call void @bar1(ptr [[P]]) ; CHECK-NEXT: call void @bar2(ptr [[P]], ptr [[A_I]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[A_I]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_I]]) ; CHECK-NEXT: call void @bar1(ptr [[P]]) ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/Inline/byval-align.ll b/llvm/test/Transforms/Inline/byval-align.ll index 0b135aa..a23f364 100644 --- a/llvm/test/Transforms/Inline/byval-align.ll +++ b/llvm/test/Transforms/Inline/byval-align.ll @@ -28,13 +28,13 @@ define void @byval_caller(ptr nocapture align 64 %a, ptr %b) #0 { ; CHECK-SAME: (ptr align 64 captures(none) [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[A1:%.*]] = alloca float, align 128 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[A1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A1]]) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 128 [[A1]], ptr align 128 [[A]], i64 4, i1 false) ; CHECK-NEXT: [[LOAD_I:%.*]] = load float, ptr [[A1]], align 4 ; CHECK-NEXT: [[B_IDX_I:%.*]] = getelementptr inbounds float, ptr [[B]], i64 8 ; CHECK-NEXT: [[ADD_I:%.*]] = fadd float [[LOAD_I]], 2.000000e+00 ; CHECK-NEXT: store float [[ADD_I]], ptr [[B_IDX_I]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[A1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A1]]) ; CHECK-NEXT: [[CALLER_LOAD:%.*]] = load float, ptr [[B]], align 4 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 7 ; CHECK-NEXT: store float [[CALLER_LOAD]], ptr [[ARRAYIDX]], align 4 diff --git a/llvm/test/Transforms/Inline/byval-tail-call.ll b/llvm/test/Transforms/Inline/byval-tail-call.ll index 808104c..f8fd4a6 100644 --- a/llvm/test/Transforms/Inline/byval-tail-call.ll +++ b/llvm/test/Transforms/Inline/byval-tail-call.ll @@ -22,11 +22,11 @@ define void @bar(ptr byval(i32) %x) { define void @foo(ptr %x) { ; CHECK-LABEL: @foo( ; CHECK-NEXT: [[X1:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[X1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X1]]) ; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[X:%.*]], align 1 ; CHECK-NEXT: store i32 [[TMP2]], ptr [[X1]], align 4 ; CHECK-NEXT: call void @ext(ptr nonnull [[X1]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[X1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X1]]) ; CHECK-NEXT: ret void ; call void @bar(ptr byval(i32) %x) @@ -42,12 +42,12 @@ define internal void @qux(ptr byval(i32) %x) { define void @frob(ptr %x) { ; CHECK-LABEL: @frob( ; CHECK-NEXT: [[X1:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[X1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X1]]) ; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[X:%.*]], align 1 ; CHECK-NEXT: store i32 [[TMP2]], ptr [[X1]], align 4 ; CHECK-NEXT: call void @ext(ptr nonnull [[X1]]) ; CHECK-NEXT: tail call void @ext(ptr null) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[X1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X1]]) ; CHECK-NEXT: ret void ; tail call void @qux(ptr byval(i32) %x) @@ -71,11 +71,11 @@ define void @bar2(ptr byval(i32) %x) { define void @foobar(ptr %x) { ; CHECK-LABEL: @foobar( ; CHECK-NEXT: [[X1:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[X1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X1]]) ; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[X:%.*]], align 1 ; CHECK-NEXT: store i32 [[TMP2]], ptr [[X1]], align 4 ; CHECK-NEXT: tail call void @ext2(ptr nonnull byval(i32) [[X1]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[X1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X1]]) ; CHECK-NEXT: ret void ; tail call void @bar2(ptr byval(i32) %x) @@ -85,9 +85,9 @@ define void @foobar(ptr %x) { define void @barfoo() { ; CHECK-LABEL: @barfoo( ; CHECK-NEXT: [[X1:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[X1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X1]]) ; CHECK-NEXT: tail call void @ext2(ptr nonnull byval(i32) [[X1]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[X1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X1]]) ; CHECK-NEXT: ret void ; %x = alloca i32 diff --git a/llvm/test/Transforms/Inline/byval-with-non-alloca-addrspace.ll b/llvm/test/Transforms/Inline/byval-with-non-alloca-addrspace.ll index 1d1cb45..e79ac66 100644 --- a/llvm/test/Transforms/Inline/byval-with-non-alloca-addrspace.ll +++ b/llvm/test/Transforms/Inline/byval-with-non-alloca-addrspace.ll @@ -26,11 +26,11 @@ define i64 @foo(ptr %arg) { ; CHECK-LABEL: define i64 @foo( ; CHECK-SAME: ptr [[ARG:%.*]]) { ; CHECK-NEXT: [[ARG1:%.*]] = alloca [[STRUCT:%.*]], align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[ARG1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ARG1]]) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[ARG1]], ptr align 8 [[ARG]], i64 16, i1 false) ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr [[STRUCT]], ptr [[ARG1]], i64 0, i32 1 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr [[TMP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[ARG1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ARG1]]) ; CHECK-NEXT: ret i64 0 ; %1 = call i64 @bar(ptr byval(%struct) align 8 %arg) diff --git a/llvm/test/Transforms/Inline/byval.ll b/llvm/test/Transforms/Inline/byval.ll index b4a19c5..c945d7f 100644 --- a/llvm/test/Transforms/Inline/byval.ll +++ b/llvm/test/Transforms/Inline/byval.ll @@ -35,12 +35,12 @@ define i32 @test1() nounwind { ; CHECK-NEXT: store i32 1, ptr [[TMP1]], align 8 ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr [[STRUCT_SS]], ptr [[S]], i32 0, i32 1 ; CHECK-NEXT: store i64 2, ptr [[TMP4]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr [[S1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[S1]]) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[S1]], ptr [[S]], i64 12, i1 false) ; CHECK-NEXT: [[TMP1_I:%.*]] = load i32, ptr [[S1]], align 4 ; CHECK-NEXT: [[TMP2_I:%.*]] = add i32 [[TMP1_I]], 1 ; CHECK-NEXT: store i32 [[TMP2_I]], ptr [[S1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr [[S1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[S1]]) ; CHECK-NEXT: ret i32 0 ; entry: @@ -104,10 +104,10 @@ define void @test3() nounwind { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[S1:%.*]] = alloca [[STRUCT_SS:%.*]], align 64 ; CHECK-NEXT: [[S:%.*]] = alloca [[STRUCT_SS]], align 1 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr [[S1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[S1]]) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 64 [[S1]], ptr align 64 [[S]], i64 12, i1 false) ; CHECK-NEXT: call void @g3(ptr align 64 [[S1]]) #[[ATTR0]] -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr [[S1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[S1]]) ; CHECK-NEXT: ret void ; entry: @@ -157,12 +157,12 @@ define i32 @test5() { ; CHECK-LABEL: define i32 @test5() { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[B:%.*]] = alloca [[STRUCT_S0:%.*]], align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[B]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B]]) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[B]], ptr align 4 @b, i64 4, i1 false) ; CHECK-NEXT: store i32 0, ptr @b, align 4 ; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[B]], align 4 ; CHECK-NEXT: store i32 [[TMP0]], ptr @a, align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[B]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B]]) ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @a, align 4 ; CHECK-NEXT: ret i32 [[TMP1]] ; diff --git a/llvm/test/Transforms/Inline/callbr.ll b/llvm/test/Transforms/Inline/callbr.ll index 1607700..57e92bb 100644 --- a/llvm/test/Transforms/Inline/callbr.ll +++ b/llvm/test/Transforms/Inline/callbr.ll @@ -10,8 +10,8 @@ define dso_local i32 @main() { ; CHECK-NEXT: [[I1_I:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[I:%.*]] = alloca i32, align 4 ; CHECK-NEXT: store i32 0, ptr [[I]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I_I]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I1_I]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I_I]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I1_I]]) ; CHECK-NEXT: store i32 0, ptr [[I1_I]], align 4 ; CHECK-NEXT: [[I2_I:%.*]] = load i32, ptr [[I1_I]], align 4 ; CHECK-NEXT: callbr void asm sideeffect "", "r,!i,!i,~{dirflag},~{fpsr},~{flags}"(i32 [[I2_I]]) @@ -27,8 +27,8 @@ define dso_local i32 @main() { ; CHECK-NEXT: br label [[T32_EXIT]] ; CHECK: t32.exit: ; CHECK-NEXT: [[I7_I:%.*]] = load i32, ptr [[I_I]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I_I]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I1_I]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[I_I]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[I1_I]]) ; CHECK-NEXT: ret i32 [[I7_I]] ; bb: diff --git a/llvm/test/Transforms/Inline/devirtualize-4.ll b/llvm/test/Transforms/Inline/devirtualize-4.ll index d29360f..f96b5a9 100644 --- a/llvm/test/Transforms/Inline/devirtualize-4.ll +++ b/llvm/test/Transforms/Inline/devirtualize-4.ll @@ -48,14 +48,14 @@ define dso_local void @_Z4Testv() local_unnamed_addr { entry: %o = alloca %class.Impl, align 8 - call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %o) + call void @llvm.lifetime.start.p0(ptr nonnull %o) call void @_ZN4ImplC2Ev(ptr nonnull %o) call fastcc void @_ZL11IndirectRunR9Interface(ptr nonnull dereferenceable(8) %o) - call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %o) + call void @llvm.lifetime.end.p0(ptr nonnull %o) ret void } -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) define linkonce_odr dso_local void @_ZN4ImplC2Ev(ptr %this) unnamed_addr align 2 { entry: @@ -74,7 +74,7 @@ entry: ret void } -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) define linkonce_odr dso_local void @_ZN9InterfaceC2Ev(ptr %this) unnamed_addr align 2 { entry: @@ -85,10 +85,10 @@ entry: define linkonce_odr dso_local void @_ZN4Impl3RunEv(ptr %this) unnamed_addr align 2 { entry: %ref.tmp = alloca ptr, align 8 - call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %ref.tmp) + call void @llvm.lifetime.start.p0(ptr nonnull %ref.tmp) store ptr %this, ptr %ref.tmp, align 8 call void @_Z13DoNotOptimizeIP4ImplEvRKT_(ptr nonnull dereferenceable(8) %ref.tmp) - call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %ref.tmp) + call void @llvm.lifetime.end.p0(ptr nonnull %ref.tmp) ret void } @@ -160,10 +160,10 @@ memptr.end: ; preds = %memptr.nonvirtual, define i32 @_Z2g1v() { entry: %a = alloca %struct.A, align 8 - call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %a) + call void @llvm.lifetime.start.p0(ptr nonnull %a) call void @_ZN1AC1Ev(ptr nonnull %a) %call = call i32 @_Z1fP1AMS_FivE(ptr nonnull %a, i64 1, i64 0) - call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %a) + call void @llvm.lifetime.end.p0(ptr nonnull %a) ret i32 %call } @@ -176,10 +176,10 @@ entry: define i32 @_Z2g2v() { entry: %a = alloca %struct.A, align 8 - call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %a) + call void @llvm.lifetime.start.p0(ptr nonnull %a) call void @_ZN1AC1Ev(ptr nonnull %a) %call = call i32 @_Z1fP1AMS_FivE(ptr nonnull %a, i64 9, i64 0) - call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %a) + call void @llvm.lifetime.end.p0(ptr nonnull %a) ret i32 %call } diff --git a/llvm/test/Transforms/Inline/dynamic-alloca-simplified-large.ll b/llvm/test/Transforms/Inline/dynamic-alloca-simplified-large.ll index 9b293d39..aad192a 100644 --- a/llvm/test/Transforms/Inline/dynamic-alloca-simplified-large.ll +++ b/llvm/test/Transforms/Inline/dynamic-alloca-simplified-large.ll @@ -54,9 +54,9 @@ define void @caller2_below_threshold(ptr %p1, i1 %b) { ; CHECK-NEXT: br i1 [[COND]], label [[EXIT:%.*]], label [[SPLIT:%.*]] ; CHECK: split: ; CHECK-NEXT: [[SAVEDSTACK:%.*]] = call ptr @llvm.stacksave.p0() -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 60000, ptr [[VLA_I]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VLA_I]]) ; CHECK-NEXT: call void @extern_call(ptr nonnull [[VLA_I]]) #[[ATTR3]] -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 60000, ptr [[VLA_I]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VLA_I]]) ; CHECK-NEXT: call void @llvm.stackrestore.p0(ptr [[SAVEDSTACK]]) ; CHECK-NEXT: br label [[EXIT]] ; CHECK: exit: diff --git a/llvm/test/Transforms/Inline/inline-deferred-instsimplify.ll b/llvm/test/Transforms/Inline/inline-deferred-instsimplify.ll index c74351b..02f5774 100644 --- a/llvm/test/Transforms/Inline/inline-deferred-instsimplify.ll +++ b/llvm/test/Transforms/Inline/inline-deferred-instsimplify.ll @@ -52,7 +52,7 @@ return: ; preds = %check_pointers_are_ define i32 @main() { ; CHECK-LABEL: define i32 @main() { ; CHECK-NEXT: [[G_VAR:%.*]] = alloca [[STRUCT_A:%.*]], align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 20, ptr [[G_VAR]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[G_VAR]]) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[G_VAR]], ptr align 8 @g_var, i64 20, i1 false) ; CHECK-NEXT: [[VAL_I:%.*]] = load i32, ptr [[G_VAR]], align 8 ; CHECK-NEXT: [[DOTNOT_I:%.*]] = icmp eq i32 [[VAL_I]], 0 @@ -68,7 +68,7 @@ define i32 @main() { ; CHECK-NEXT: call void @abort() ; CHECK-NEXT: unreachable ; CHECK: callee.exit: -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 20, ptr [[G_VAR]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[G_VAR]]) ; CHECK-NEXT: ret i32 0 ; call void @callee(ptr noundef byval(%struct.a) align 8 @g_var) diff --git a/llvm/test/Transforms/Inline/inline-tail.ll b/llvm/test/Transforms/Inline/inline-tail.ll index 0bfd056..b2bf3bb 100644 --- a/llvm/test/Transforms/Inline/inline-tail.ll +++ b/llvm/test/Transforms/Inline/inline-tail.ll @@ -64,7 +64,7 @@ define void @test_byval_a(ptr byval(i32) %p) { ; CHECK-LABEL: define void @test_byval_a ; CHECK-SAME: (ptr byval(i32) [[P:%.*]]) { ; CHECK-NEXT: [[P1:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[P1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[P1]]) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[P1]], ptr [[P]], i64 4, i1 false) ; CHECK-NEXT: musttail call void @test_byval_c(ptr byval(i32) [[P1]]) ; CHECK-NEXT: ret void @@ -87,7 +87,7 @@ define void @test_dynalloca_a(ptr byval(i32) %p, i32 %n) { ; CHECK-SAME: (ptr byval(i32) [[P:%.*]], i32 [[N:%.*]]) { ; CHECK-NEXT: [[P1:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[SAVEDSTACK:%.*]] = call ptr @llvm.stacksave.p0() -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[P1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[P1]]) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[P1]], ptr [[P]], i64 4, i1 false) ; CHECK-NEXT: [[BUF_I:%.*]] = alloca i8, i32 [[N]], align 1 ; CHECK-NEXT: call void @escape(ptr [[BUF_I]]) diff --git a/llvm/test/Transforms/Inline/inlined-mustprogress-loop-metadata.ll b/llvm/test/Transforms/Inline/inlined-mustprogress-loop-metadata.ll index 4e13ff4..4ac4675 100644 --- a/llvm/test/Transforms/Inline/inlined-mustprogress-loop-metadata.ll +++ b/llvm/test/Transforms/Inline/inlined-mustprogress-loop-metadata.ll @@ -312,9 +312,9 @@ define void @caller_multiple(i32 %a, i32 %b) #1 { ; CHECK-NEXT: store i32 [[INC]], ptr [[I]], align 4 ; CHECK-NEXT: br label %[[FOR_COND1]] ; CHECK: [[FOR_END4]]: -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[A_ADDR_I]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[B_ADDR_I]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I_I]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_ADDR_I]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B_ADDR_I]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I_I]]) ; CHECK-NEXT: store i32 0, ptr [[A_ADDR_I]], align 4 ; CHECK-NEXT: store i32 5, ptr [[B_ADDR_I]], align 4 ; CHECK-NEXT: br label %[[FOR_COND_I:.*]] @@ -526,9 +526,9 @@ define void @caller_nested(i32 %a, i32 %b) #1 { ; CHECK-NEXT: store i32 [[INC14]], ptr [[I9]], align 4 ; CHECK-NEXT: br label %[[FOR_COND10]] ; CHECK: [[FOR_END15]]: -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[A_ADDR_I]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[B_ADDR_I]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I_I]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_ADDR_I]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B_ADDR_I]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I_I]]) ; CHECK-NEXT: store i32 0, ptr [[A_ADDR_I]], align 4 ; CHECK-NEXT: store i32 5, ptr [[B_ADDR_I]], align 4 ; CHECK-NEXT: br label %[[FOR_COND_I:.*]] diff --git a/llvm/test/Transforms/Inline/lifetime-no-datalayout.ll b/llvm/test/Transforms/Inline/lifetime-no-datalayout.ll index 7438ef3..074550b 100644 --- a/llvm/test/Transforms/Inline/lifetime-no-datalayout.ll +++ b/llvm/test/Transforms/Inline/lifetime-no-datalayout.ll @@ -18,9 +18,9 @@ define void @helper() { define void @test() { ; CHECK-LABEL: define void @test() { ; CHECK-NEXT: [[A_I:%.*]] = alloca i8, align 1 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[A_I]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_I]]) ; CHECK-NEXT: call void @use(ptr [[A_I]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[A_I]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_I]]) ; CHECK-NEXT: ret void ; call void @helper() diff --git a/llvm/test/Transforms/Inline/lifetime.ll b/llvm/test/Transforms/Inline/lifetime.ll index 3ef5019..06b911d 100644 --- a/llvm/test/Transforms/Inline/lifetime.ll +++ b/llvm/test/Transforms/Inline/lifetime.ll @@ -2,21 +2,21 @@ ; RUN: opt -passes=inline -S < %s | FileCheck %s target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" -declare void @llvm.lifetime.start.p0(i64, ptr) -declare void @llvm.lifetime.end.p0(i64, ptr) +declare void @llvm.lifetime.start.p0(ptr) +declare void @llvm.lifetime.end.p0(ptr) define void @helper_both_markers() { ; CHECK-LABEL: define void @helper_both_markers() { ; CHECK-NEXT: [[A:%.*]] = alloca i8, align 1 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[A]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]]) ; CHECK-NEXT: ret void ; %a = alloca i8 ; Size in llvm.lifetime.start / llvm.lifetime.end differs from ; allocation size. We should use the former. - call void @llvm.lifetime.start.p0(i64 2, ptr %a) - call void @llvm.lifetime.end.p0(i64 2, ptr %a) + call void @llvm.lifetime.start.p0(ptr %a) + call void @llvm.lifetime.end.p0(ptr %a) ret void } @@ -24,10 +24,10 @@ define void @test_both_markers() { ; CHECK-LABEL: define void @test_both_markers() { ; CHECK-NEXT: [[A_I1:%.*]] = alloca i8, align 1 ; CHECK-NEXT: [[A_I:%.*]] = alloca i8, align 1 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[A_I]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[A_I]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[A_I1]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[A_I1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_I]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_I]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_I1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_I1]]) ; CHECK-NEXT: ret void ; call void @helper_both_markers() @@ -54,12 +54,12 @@ define void @test_no_marker() { ; CHECK-LABEL: define void @test_no_marker() { ; CHECK-NEXT: [[A_I1:%.*]] = alloca i8, align 1 ; CHECK-NEXT: [[A_I:%.*]] = alloca i8, align 1 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[A_I]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_I]]) ; CHECK-NEXT: call void @use(ptr [[A_I]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[A_I]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[A_I1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_I]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_I1]]) ; CHECK-NEXT: call void @use(ptr [[A_I1]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[A_I1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_I1]]) ; CHECK-NEXT: ret void ; call void @helper_no_markers() @@ -70,13 +70,13 @@ define void @test_no_marker() { define void @helper_two_casts() { ; CHECK-LABEL: define void @helper_two_casts() { ; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[A]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]]) ; CHECK-NEXT: ret void ; %a = alloca i32 - call void @llvm.lifetime.start.p0(i64 4, ptr %a) - call void @llvm.lifetime.end.p0(i64 4, ptr %a) + call void @llvm.lifetime.start.p0(ptr %a) + call void @llvm.lifetime.end.p0(ptr %a) ret void } @@ -84,10 +84,10 @@ define void @test_two_casts() { ; CHECK-LABEL: define void @test_two_casts() { ; CHECK-NEXT: [[A_I1:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[A_I:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[A_I]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[A_I]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[A_I1]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[A_I1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_I]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_I]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_I1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_I1]]) ; CHECK-NEXT: ret void ; call void @helper_two_casts() @@ -109,9 +109,9 @@ define void @helper_arrays_alloca() { define void @test_arrays_alloca() { ; CHECK-LABEL: define void @test_arrays_alloca() { ; CHECK-NEXT: [[A_I:%.*]] = alloca [10 x i32], align 16 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 40, ptr [[A_I]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_I]]) ; CHECK-NEXT: call void @use(ptr [[A_I]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 40, ptr [[A_I]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_I]]) ; CHECK-NEXT: ret void ; call void @helper_arrays_alloca() diff --git a/llvm/test/Transforms/Inline/no-inline-incompatible-gc.ll b/llvm/test/Transforms/Inline/no-inline-incompatible-gc.ll index 531801d..2bded9c 100644 --- a/llvm/test/Transforms/Inline/no-inline-incompatible-gc.ll +++ b/llvm/test/Transforms/Inline/no-inline-incompatible-gc.ll @@ -16,12 +16,12 @@ define i32 @caller_no_gc() { ; CHECK-LABEL: define i32 @caller_no_gc() gc "example" { ; CHECK-NEXT: [[ROOT_I:%.*]] = alloca ptr, align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[ROOT_I]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ROOT_I]]) ; CHECK-NEXT: call void @llvm.gcroot(ptr [[ROOT_I]], ptr null) ; CHECK-NEXT: [[OBJ_I:%.*]] = call ptr @h() ; CHECK-NEXT: store ptr [[OBJ_I]], ptr [[ROOT_I]], align 8 ; CHECK-NEXT: [[LENGTH_I:%.*]] = load i32, ptr [[OBJ_I]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[ROOT_I]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ROOT_I]]) ; CHECK-NEXT: ret i32 [[LENGTH_I]] ; %x = call i32 @callee_with_gc() @@ -32,12 +32,12 @@ define i32 @caller_no_gc() { define i32 @caller_same_gc() gc "example" { ; CHECK-LABEL: define i32 @caller_same_gc() gc "example" { ; CHECK-NEXT: [[ROOT_I:%.*]] = alloca ptr, align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[ROOT_I]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ROOT_I]]) ; CHECK-NEXT: call void @llvm.gcroot(ptr [[ROOT_I]], ptr null) ; CHECK-NEXT: [[OBJ_I:%.*]] = call ptr @h() ; CHECK-NEXT: store ptr [[OBJ_I]], ptr [[ROOT_I]], align 8 ; CHECK-NEXT: [[LENGTH_I:%.*]] = load i32, ptr [[OBJ_I]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[ROOT_I]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ROOT_I]]) ; CHECK-NEXT: ret i32 [[LENGTH_I]] ; %x = call i32 @callee_with_gc() @@ -97,12 +97,12 @@ define i32 @callee_with_other_gc() gc "other-example" { define i32 @caller_inline_first_caller() { ; CHECK-LABEL: define i32 @caller_inline_first_caller() gc "example" { ; CHECK-NEXT: [[ROOT_I:%.*]] = alloca ptr, align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[ROOT_I]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ROOT_I]]) ; CHECK-NEXT: call void @llvm.gcroot(ptr [[ROOT_I]], ptr null) ; CHECK-NEXT: [[OBJ_I:%.*]] = call ptr @h() ; CHECK-NEXT: store ptr [[OBJ_I]], ptr [[ROOT_I]], align 8 ; CHECK-NEXT: [[LENGTH_I:%.*]] = load i32, ptr [[OBJ_I]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[ROOT_I]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ROOT_I]]) ; CHECK-NEXT: [[Y:%.*]] = call i32 @callee_with_other_gc() ; CHECK-NEXT: [[ADD:%.*]] = add i32 [[LENGTH_I]], [[Y]] ; CHECK-NEXT: ret i32 [[ADD]] @@ -118,12 +118,12 @@ define i32 @caller_inline_first_caller() { define i32 @caller_inline_second_caller() gc "example" { ; CHECK-LABEL: define i32 @caller_inline_second_caller() gc "example" { ; CHECK-NEXT: [[ROOT_I:%.*]] = alloca ptr, align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[ROOT_I]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ROOT_I]]) ; CHECK-NEXT: call void @llvm.gcroot(ptr [[ROOT_I]], ptr null) ; CHECK-NEXT: [[OBJ_I:%.*]] = call ptr @h() ; CHECK-NEXT: store ptr [[OBJ_I]], ptr [[ROOT_I]], align 8 ; CHECK-NEXT: [[LENGTH_I:%.*]] = load i32, ptr [[OBJ_I]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[ROOT_I]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ROOT_I]]) ; CHECK-NEXT: [[Y:%.*]] = call i32 @callee_with_other_gc() ; CHECK-NEXT: [[ADD:%.*]] = add i32 [[LENGTH_I]], [[Y]] ; CHECK-NEXT: ret i32 [[ADD]] diff --git a/llvm/test/Transforms/Inline/noalias-calls-always.ll b/llvm/test/Transforms/Inline/noalias-calls-always.ll index a80cd12..18a65b9 100644 --- a/llvm/test/Transforms/Inline/noalias-calls-always.ll +++ b/llvm/test/Transforms/Inline/noalias-calls-always.ll @@ -33,13 +33,13 @@ define void @foo(ptr nocapture %a, ptr nocapture readonly %c, ptr nocapture %b) ; CHECK-NEXT: [[L_I:%.*]] = alloca i8, i32 512, align 1 ; CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META0:![0-9]+]]) ; CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 512, ptr [[L_I]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[L_I]]) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[A:%.*]], ptr align 16 [[B:%.*]], i64 16, i1 false), !noalias [[META3]] ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[B]], ptr readonly align 16 [[C:%.*]], i64 16, i1 false), !noalias [[META0]] ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[A]], ptr readonly align 16 [[C]], i64 16, i1 false), !alias.scope [[META5:![0-9]+]] ; CHECK-NEXT: call void @hey(), !noalias [[META5]] ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[L_I]], ptr readonly align 16 [[C]], i64 16, i1 false), !noalias [[META0]] -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 512, ptr [[L_I]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[L_I]]) ; CHECK-NEXT: ret void ; entry: @@ -74,13 +74,13 @@ define void @foo_cs(ptr nocapture %a, ptr nocapture readonly %c, ptr nocapture % ; CHECK-NEXT: [[L_I:%.*]] = alloca i8, i32 512, align 1 ; CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]]) ; CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META9:![0-9]+]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 512, ptr [[L_I]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[L_I]]) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[A:%.*]], ptr align 16 [[B:%.*]], i64 16, i1 false), !noalias [[META9]] ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[B]], ptr readonly align 16 [[C:%.*]], i64 16, i1 false), !noalias [[META6]] ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[A]], ptr readonly align 16 [[C]], i64 16, i1 false), !alias.scope [[META11:![0-9]+]] ; CHECK-NEXT: call void @hey(), !noalias [[META11]] ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[L_I]], ptr readonly align 16 [[C]], i64 16, i1 false), !noalias [[META6]] -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 512, ptr [[L_I]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[L_I]]) ; CHECK-NEXT: ret void ; entry: diff --git a/llvm/test/Transforms/Inline/noalias-calls.ll b/llvm/test/Transforms/Inline/noalias-calls.ll index fdbad60..4673dae 100644 --- a/llvm/test/Transforms/Inline/noalias-calls.ll +++ b/llvm/test/Transforms/Inline/noalias-calls.ll @@ -36,13 +36,13 @@ define void @foo(ptr nocapture %a, ptr nocapture readonly %c, ptr nocapture %b) ; CHECK-NEXT: [[L_I:%.*]] = alloca i8, i32 512, align 1 ; CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META0:![0-9]+]]) ; CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 512, ptr [[L_I]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[L_I]]) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[A]], ptr align 16 [[B]], i64 16, i1 false), !noalias [[META3]] ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[B]], ptr readonly align 16 [[C]], i64 16, i1 false), !noalias [[META0]] ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[A]], ptr readonly align 16 [[C]], i64 16, i1 false), !alias.scope [[META5:![0-9]+]] ; CHECK-NEXT: call void @hey(), !noalias [[META5]] ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[L_I]], ptr readonly align 16 [[C]], i64 16, i1 false), !noalias [[META0]] -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 512, ptr [[L_I]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[L_I]]) ; CHECK-NEXT: ret void ; entry: @@ -79,13 +79,13 @@ define void @foo_cs(ptr nocapture %a, ptr nocapture readonly %c, ptr nocapture % ; CHECK-NEXT: [[L_I:%.*]] = alloca i8, i32 512, align 1 ; CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]]) ; CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META9:![0-9]+]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 512, ptr [[L_I]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[L_I]]) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[A]], ptr align 16 [[B]], i64 16, i1 false), !noalias [[META9]] ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[B]], ptr readonly align 16 [[C]], i64 16, i1 false), !noalias [[META6]] ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[A]], ptr readonly align 16 [[C]], i64 16, i1 false), !alias.scope [[META11:![0-9]+]] ; CHECK-NEXT: call void @hey(), !noalias [[META11]] ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[L_I]], ptr readonly align 16 [[C]], i64 16, i1 false), !noalias [[META6]] -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 512, ptr [[L_I]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[L_I]]) ; CHECK-NEXT: ret void ; entry: diff --git a/llvm/test/Transforms/InstCombine/2025-08-06-shufflevector-bitcast-vector-of-pointers.ll b/llvm/test/Transforms/InstCombine/2025-08-06-shufflevector-bitcast-vector-of-pointers.ll new file mode 100644 index 0000000..e778d92 --- /dev/null +++ b/llvm/test/Transforms/InstCombine/2025-08-06-shufflevector-bitcast-vector-of-pointers.ll @@ -0,0 +1,15 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt < %s -passes=instcombine -S | FileCheck %s + +; Make sure that we don't crash when optimizing shufflevector of <N x ptr> with <1 x i32> mask followed by bitcast of <1 x ptr> to ptr + +define ptr @test(<3 x ptr> %vptr) { +; CHECK-LABEL: define ptr @test( +; CHECK-SAME: <3 x ptr> [[VPTR:%.*]]) { +; CHECK-NEXT: [[SV_EXTRACT:%.*]] = extractelement <3 x ptr> [[VPTR]], i64 0 +; CHECK-NEXT: ret ptr [[SV_EXTRACT]] +; + %SV = shufflevector <3 x ptr> %vptr, <3 x ptr> zeroinitializer, <1 x i32> zeroinitializer + %BC = bitcast <1 x ptr> %SV to ptr + ret ptr %BC +} diff --git a/llvm/test/Transforms/InstCombine/assume_inevitable.ll b/llvm/test/Transforms/InstCombine/assume_inevitable.ll index 2643c9b..5f27ff1 100644 --- a/llvm/test/Transforms/InstCombine/assume_inevitable.ll +++ b/llvm/test/Transforms/InstCombine/assume_inevitable.ll @@ -35,10 +35,10 @@ entry: %dummy_eq = icmp ugt i32 %loadres, 42 tail call void @llvm.assume(i1 %dummy_eq) - call void @llvm.lifetime.start.p0(i64 1, ptr %dummy) + call void @llvm.lifetime.start.p0(ptr %dummy) %i = call ptr @llvm.invariant.start.p0(i64 1, ptr %dummy) call void @llvm.invariant.end.p0(ptr %i, i64 1, ptr %dummy) - call void @llvm.lifetime.end.p0(i64 1, ptr %dummy) + call void @llvm.lifetime.end.p0(ptr %dummy) %m_a = call ptr @llvm.ptr.annotation.p0(ptr %m, ptr @.str, ptr @.str1, i32 2, ptr null) %objsz = call i64 @llvm.objectsize.i64.p0(ptr %c, i1 false) @@ -61,8 +61,8 @@ declare i64 @llvm.objectsize.i64.p0(ptr, i1) declare i32 @llvm.annotation.i32(i32, ptr, ptr, i32) declare ptr @llvm.ptr.annotation.p0(ptr, ptr, ptr, i32, ptr) -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) declare ptr @llvm.invariant.start.p0(i64, ptr nocapture) declare void @llvm.invariant.end.p0(ptr, i64, ptr nocapture) diff --git a/llvm/test/Transforms/InstCombine/builtin-object-size-custom-dl.ll b/llvm/test/Transforms/InstCombine/builtin-object-size-custom-dl.ll index fe8b321..93c4ae6 100644 --- a/llvm/test/Transforms/InstCombine/builtin-object-size-custom-dl.ll +++ b/llvm/test/Transforms/InstCombine/builtin-object-size-custom-dl.ll @@ -15,16 +15,16 @@ entry: define i32 @objsize2_custom_idx() #0 { entry: %var = alloca %struct.V, align 4 - call void @llvm.lifetime.start.p0(i64 28, ptr %var) #3 + call void @llvm.lifetime.start.p0(ptr %var) #3 %arrayidx = getelementptr inbounds [10 x i8], ptr %var, i64 0, i64 1 %0 = call i64 @llvm.objectsize.i64.p0(ptr %arrayidx, i1 false, i1 false, i1 false) %conv = trunc i64 %0 to i32 - call void @llvm.lifetime.end.p0(i64 28, ptr %var) #3 + call void @llvm.lifetime.end.p0(ptr %var) #3 ret i32 %conv ; CHECK: ret i32 27 } -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1 -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 declare ptr @malloc(i64) declare i64 @llvm.objectsize.i64.p0(ptr, i1, i1, i1) diff --git a/llvm/test/Transforms/InstCombine/builtin-object-size-offset.ll b/llvm/test/Transforms/InstCombine/builtin-object-size-offset.ll index b8919a7..051466f 100644 --- a/llvm/test/Transforms/InstCombine/builtin-object-size-offset.ll +++ b/llvm/test/Transforms/InstCombine/builtin-object-size-offset.ll @@ -25,24 +25,24 @@ define i32 @foo1(i32 %N) { entry: %Big = alloca [20 x i8], align 16 %Small = alloca [10 x i8], align 1 - call void @llvm.lifetime.start.p0(i64 20, ptr %Big) - call void @llvm.lifetime.start.p0(i64 10, ptr %Small) + call void @llvm.lifetime.start.p0(ptr %Big) + call void @llvm.lifetime.start.p0(ptr %Small) %tobool = icmp ne i32 %N, 0 %add.ptr = getelementptr inbounds [20 x i8], ptr %Big, i64 0, i64 10 %cond = select i1 %tobool, ptr %add.ptr, ptr %Small %0 = call i64 @llvm.objectsize.i64.p0(ptr %cond, i1 false) %conv = trunc i64 %0 to i32 - call void @llvm.lifetime.end.p0(i64 10, ptr %Small) - call void @llvm.lifetime.end.p0(i64 20, ptr %Big) + call void @llvm.lifetime.end.p0(ptr %Small) + call void @llvm.lifetime.end.p0(ptr %Big) ret i32 %conv ; CHECK: ret i32 10 } -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) declare i64 @llvm.objectsize.i64.p0(ptr, i1) -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) define void @foo() { entry: diff --git a/llvm/test/Transforms/InstCombine/builtin-object-size-ptr.ll b/llvm/test/Transforms/InstCombine/builtin-object-size-ptr.ll index 533451f..3a7b760 100644 --- a/llvm/test/Transforms/InstCombine/builtin-object-size-ptr.ll +++ b/llvm/test/Transforms/InstCombine/builtin-object-size-ptr.ll @@ -18,11 +18,11 @@ define i32 @foo() #0 { ; CHECK-NEXT: ret i32 27 ; %var = alloca %struct.V, align 4 - call void @llvm.lifetime.start.p0(i64 28, ptr %var) #3 + call void @llvm.lifetime.start.p0(ptr %var) #3 %arrayidx = getelementptr inbounds [10 x i8], ptr %var, i64 0, i64 1 %t1 = call i64 @llvm.objectsize.i64.p0(ptr %arrayidx, i1 false) %conv = trunc i64 %t1 to i32 - call void @llvm.lifetime.end.p0(i64 28, ptr %var) #3 + call void @llvm.lifetime.end.p0(ptr %var) #3 ret i32 %conv } @@ -63,9 +63,9 @@ define ptr @minimal_invariant_start_use(i8 %x) { ret ptr %i } -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 declare i64 @llvm.objectsize.i64.p0(ptr, i1) #2 -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg) #0 declare ptr @llvm.invariant.start.p0(i64 immarg, ptr nocapture) #0 declare void @llvm.invariant.end.p0(ptr, i64 immarg, ptr nocapture) #0 diff --git a/llvm/test/Transforms/InstCombine/compare-alloca.ll b/llvm/test/Transforms/InstCombine/compare-alloca.ll index a27cd70..55d92b7 100644 --- a/llvm/test/Transforms/InstCombine/compare-alloca.ll +++ b/llvm/test/Transforms/InstCombine/compare-alloca.ll @@ -86,18 +86,18 @@ define i1 @alloca_argument_compare_escaped_through_store(ptr %arg, ptr %ptr) { ret i1 %cmp } -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) define i1 @alloca_argument_compare_benign_instrs(ptr %arg) { ; CHECK-LABEL: @alloca_argument_compare_benign_instrs( ; CHECK-NEXT: ret i1 false ; %alloc = alloca i8 - call void @llvm.lifetime.start.p0(i64 1, ptr %alloc) + call void @llvm.lifetime.start.p0(ptr %alloc) %cmp = icmp eq ptr %arg, %alloc %x = load i8, ptr %arg store i8 %x, ptr %alloc - call void @llvm.lifetime.end.p0(i64 1, ptr %alloc) + call void @llvm.lifetime.end.p0(ptr %alloc) ret i1 %cmp } diff --git a/llvm/test/Transforms/InstCombine/deadcode.ll b/llvm/test/Transforms/InstCombine/deadcode.ll index f3e1ba6..4dcdbb9 100644 --- a/llvm/test/Transforms/InstCombine/deadcode.ll +++ b/llvm/test/Transforms/InstCombine/deadcode.ll @@ -22,13 +22,13 @@ define ptr @test2(i32 %width) { declare ptr @llvm.stacksave() -declare void @llvm.lifetime.start.p0(i64, ptr) -declare void @llvm.lifetime.end.p0(i64, ptr) +declare void @llvm.lifetime.start.p0(ptr) +declare void @llvm.lifetime.end.p0(ptr) define void @test3() { %a = alloca i32 - call void @llvm.lifetime.start.p0(i64 -1, ptr %a) - call void @llvm.lifetime.end.p0(i64 -1, ptr %a) + call void @llvm.lifetime.start.p0(ptr %a) + call void @llvm.lifetime.end.p0(ptr %a) ret void } diff --git a/llvm/test/Transforms/InstCombine/lifetime-no-null-opt.ll b/llvm/test/Transforms/InstCombine/lifetime-no-null-opt.ll index 422e179..597bd2c 100644 --- a/llvm/test/Transforms/InstCombine/lifetime-no-null-opt.ll +++ b/llvm/test/Transforms/InstCombine/lifetime-no-null-opt.ll @@ -2,8 +2,8 @@ ; RUN: opt < %s -passes=instcombine -S | FileCheck %s declare void @llvm.dbg.declare(metadata, metadata, metadata) -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) declare void @foo(ptr nocapture, ptr nocapture) define void @bar(i1 %flag) #0 !dbg !4 { @@ -20,11 +20,11 @@ define void @bar(i1 %flag) #0 !dbg !4 { ; CHECK-NEXT: #dbg_declare(ptr [[TEXT]], [[META16:![0-9]+]], !DIExpression(), [[META24:![0-9]+]]) ; CHECK-NEXT: br label [[FIN:%.*]] ; CHECK: else: -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr nonnull [[TEXT]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr nonnull [[BUFF]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TEXT]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[BUFF]]) ; CHECK-NEXT: call void @foo(ptr nonnull [[BUFF]], ptr nonnull [[TEXT]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr nonnull [[BUFF]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr nonnull [[TEXT]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[BUFF]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TEXT]]) ; CHECK-NEXT: br label [[FIN]] ; CHECK: fin: ; CHECK-NEXT: ret void @@ -35,31 +35,31 @@ entry: br i1 %flag, label %if, label %else if: - call void @llvm.lifetime.start.p0(i64 1, ptr %text) - call void @llvm.lifetime.start.p0(i64 1, ptr %buff) - call void @llvm.lifetime.end.p0(i64 1, ptr %buff) - call void @llvm.lifetime.end.p0(i64 1, ptr %text) + call void @llvm.lifetime.start.p0(ptr %text) + call void @llvm.lifetime.start.p0(ptr %buff) + call void @llvm.lifetime.end.p0(ptr %buff) + call void @llvm.lifetime.end.p0(ptr %text) br label %bb2 bb2: - call void @llvm.lifetime.start.p0(i64 1, ptr %text) - call void @llvm.lifetime.start.p0(i64 1, ptr %buff) - call void @llvm.lifetime.end.p0(i64 1, ptr %text) - call void @llvm.lifetime.end.p0(i64 1, ptr %buff) + call void @llvm.lifetime.start.p0(ptr %text) + call void @llvm.lifetime.start.p0(ptr %buff) + call void @llvm.lifetime.end.p0(ptr %text) + call void @llvm.lifetime.end.p0(ptr %buff) br label %bb3 bb3: - call void @llvm.lifetime.start.p0(i64 1, ptr %text) + call void @llvm.lifetime.start.p0(ptr %text) call void @llvm.dbg.declare(metadata ptr %text, metadata !14, metadata !25), !dbg !26 - call void @llvm.lifetime.end.p0(i64 1, ptr %text) + call void @llvm.lifetime.end.p0(ptr %text) br label %fin else: - call void @llvm.lifetime.start.p0(i64 1, ptr %text) - call void @llvm.lifetime.start.p0(i64 1, ptr %buff) + call void @llvm.lifetime.start.p0(ptr %text) + call void @llvm.lifetime.start.p0(ptr %buff) call void @foo(ptr %buff, ptr %text) - call void @llvm.lifetime.end.p0(i64 1, ptr %buff) - call void @llvm.lifetime.end.p0(i64 1, ptr %text) + call void @llvm.lifetime.end.p0(ptr %buff) + call void @llvm.lifetime.end.p0(ptr %text) br label %fin fin: diff --git a/llvm/test/Transforms/InstCombine/lifetime-sanitizer.ll b/llvm/test/Transforms/InstCombine/lifetime-sanitizer.ll index e379b32..fd45fe2 100644 --- a/llvm/test/Transforms/InstCombine/lifetime-sanitizer.ll +++ b/llvm/test/Transforms/InstCombine/lifetime-sanitizer.ll @@ -1,7 +1,7 @@ ; RUN: opt < %s -passes=instcombine -S | FileCheck %s -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) declare void @foo(ptr nocapture) define void @asan() sanitize_address { @@ -9,8 +9,8 @@ entry: ; CHECK-LABEL: @asan( %text = alloca i8, align 1 - call void @llvm.lifetime.start.p0(i64 1, ptr %text) - call void @llvm.lifetime.end.p0(i64 1, ptr %text) + call void @llvm.lifetime.start.p0(ptr %text) + call void @llvm.lifetime.end.p0(ptr %text) ; CHECK: call void @llvm.lifetime.start ; CHECK-NEXT: call void @llvm.lifetime.end @@ -24,8 +24,8 @@ entry: ; CHECK-LABEL: @hwasan( %text = alloca i8, align 1 - call void @llvm.lifetime.start.p0(i64 1, ptr %text) - call void @llvm.lifetime.end.p0(i64 1, ptr %text) + call void @llvm.lifetime.start.p0(ptr %text) + call void @llvm.lifetime.end.p0(ptr %text) ; CHECK: call void @llvm.lifetime.start ; CHECK-NEXT: call void @llvm.lifetime.end @@ -39,8 +39,8 @@ entry: ; CHECK-LABEL: @msan( %text = alloca i8, align 1 - call void @llvm.lifetime.start.p0(i64 1, ptr %text) - call void @llvm.lifetime.end.p0(i64 1, ptr %text) + call void @llvm.lifetime.start.p0(ptr %text) + call void @llvm.lifetime.end.p0(ptr %text) ; CHECK: call void @llvm.lifetime.start ; CHECK-NEXT: call void @llvm.lifetime.end @@ -54,8 +54,8 @@ entry: ; CHECK-LABEL: @no_asan( %text = alloca i8, align 1 - call void @llvm.lifetime.start.p0(i64 1, ptr %text) - call void @llvm.lifetime.end.p0(i64 1, ptr %text) + call void @llvm.lifetime.start.p0(ptr %text) + call void @llvm.lifetime.end.p0(ptr %text) ; CHECK-NO: call void @llvm.lifetime call void @foo(ptr %text) ; Keep alloca alive diff --git a/llvm/test/Transforms/InstCombine/lifetime.ll b/llvm/test/Transforms/InstCombine/lifetime.ll index b94c9694..6313dba 100644 --- a/llvm/test/Transforms/InstCombine/lifetime.ll +++ b/llvm/test/Transforms/InstCombine/lifetime.ll @@ -2,8 +2,8 @@ ; RUN: opt < %s -passes=instcombine -S | FileCheck %s declare void @llvm.dbg.declare(metadata, metadata, metadata) -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) declare void @foo(ptr nocapture, ptr nocapture) define void @bar(i1 %flag) !dbg !4 { @@ -20,11 +20,11 @@ define void @bar(i1 %flag) !dbg !4 { ; CHECK-NEXT: #dbg_declare(ptr [[TEXT]], [[META16:![0-9]+]], !DIExpression(), [[META24:![0-9]+]]) ; CHECK-NEXT: br label [[FIN:%.*]] ; CHECK: else: -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr nonnull [[TEXT]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr nonnull [[BUFF]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TEXT]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[BUFF]]) ; CHECK-NEXT: call void @foo(ptr nonnull [[BUFF]], ptr nonnull [[TEXT]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr nonnull [[BUFF]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr nonnull [[TEXT]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[BUFF]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TEXT]]) ; CHECK-NEXT: br label [[FIN]] ; CHECK: fin: ; CHECK-NEXT: ret void @@ -35,31 +35,31 @@ entry: br i1 %flag, label %if, label %else if: - call void @llvm.lifetime.start.p0(i64 1, ptr %text) - call void @llvm.lifetime.start.p0(i64 1, ptr %buff) - call void @llvm.lifetime.end.p0(i64 1, ptr %buff) - call void @llvm.lifetime.end.p0(i64 1, ptr %text) + call void @llvm.lifetime.start.p0(ptr %text) + call void @llvm.lifetime.start.p0(ptr %buff) + call void @llvm.lifetime.end.p0(ptr %buff) + call void @llvm.lifetime.end.p0(ptr %text) br label %bb2 bb2: - call void @llvm.lifetime.start.p0(i64 1, ptr %text) - call void @llvm.lifetime.start.p0(i64 1, ptr %buff) - call void @llvm.lifetime.end.p0(i64 1, ptr %text) - call void @llvm.lifetime.end.p0(i64 1, ptr %buff) + call void @llvm.lifetime.start.p0(ptr %text) + call void @llvm.lifetime.start.p0(ptr %buff) + call void @llvm.lifetime.end.p0(ptr %text) + call void @llvm.lifetime.end.p0(ptr %buff) br label %bb3 bb3: - call void @llvm.lifetime.start.p0(i64 1, ptr %text) + call void @llvm.lifetime.start.p0(ptr %text) call void @llvm.dbg.declare(metadata ptr %text, metadata !14, metadata !25), !dbg !26 - call void @llvm.lifetime.end.p0(i64 1, ptr %text) + call void @llvm.lifetime.end.p0(ptr %text) br label %fin else: - call void @llvm.lifetime.start.p0(i64 1, ptr %text) - call void @llvm.lifetime.start.p0(i64 1, ptr %buff) + call void @llvm.lifetime.start.p0(ptr %text) + call void @llvm.lifetime.start.p0(ptr %buff) call void @foo(ptr %buff, ptr %text) - call void @llvm.lifetime.end.p0(i64 1, ptr %buff) - call void @llvm.lifetime.end.p0(i64 1, ptr %text) + call void @llvm.lifetime.end.p0(ptr %buff) + call void @llvm.lifetime.end.p0(ptr %text) br label %fin fin: diff --git a/llvm/test/Transforms/InstCombine/lower-dbg-declare.ll b/llvm/test/Transforms/InstCombine/lower-dbg-declare.ll index 0072153..7aa5eed 100644 --- a/llvm/test/Transforms/InstCombine/lower-dbg-declare.ll +++ b/llvm/test/Transforms/InstCombine/lower-dbg-declare.ll @@ -26,7 +26,7 @@ entry: %retval = alloca i32, align 4 %d1 = alloca i32, align 4 store i32 0, ptr %retval, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr %d1) #4, !dbg !17 + call void @llvm.lifetime.start.p0(ptr %d1) #4, !dbg !17 ; CHECK: #dbg_value(i32 42, [[METADATA_IDX1:![0-9]+]], !DIExpression(), ; CHECK-NEXT: store call void @llvm.dbg.declare(metadata ptr %d1, metadata !16, metadata !DIExpression()), !dbg !17 @@ -48,11 +48,11 @@ while.body: ; preds = %while.cond br label %while.cond, !dbg !22, !llvm.loop !24 while.end: ; preds = %while.cond - call void @llvm.lifetime.end.p0(i64 4, ptr %d1) #4, !dbg !25 + call void @llvm.lifetime.end.p0(ptr %d1) #4, !dbg !25 ret i32 0, !dbg !26 } -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) declare void @llvm.dbg.declare(metadata, metadata, metadata) @@ -64,7 +64,7 @@ define internal void @_ZL6escapeRi(ptr dereferenceable(4) %c) #3 !dbg !34 { ret void } -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 !llvm.dbg.cu = !{!2} !llvm.module.flags = !{!8, !9, !10} diff --git a/llvm/test/Transforms/InstCombine/malloc-free.ll b/llvm/test/Transforms/InstCombine/malloc-free.ll index d8a1c07..5cff5d6 100644 --- a/llvm/test/Transforms/InstCombine/malloc-free.ll +++ b/llvm/test/Transforms/InstCombine/malloc-free.ll @@ -97,8 +97,8 @@ define i1 @foo() { ret i1 %z } -declare void @llvm.lifetime.start.p0(i64, ptr) -declare void @llvm.lifetime.end.p0(i64, ptr) +declare void @llvm.lifetime.start.p0(ptr) +declare void @llvm.lifetime.end.p0(ptr) declare i64 @llvm.objectsize.i64(ptr, i1) declare void @llvm.memcpy.p0.p0.i32(ptr nocapture, ptr nocapture, i32, i1) nounwind declare void @llvm.memmove.p0.p0.i32(ptr nocapture, ptr nocapture, i32, i1) nounwind diff --git a/llvm/test/Transforms/InstCombine/memcpy-from-global.ll b/llvm/test/Transforms/InstCombine/memcpy-from-global.ll index 9c9ba83..64091a9 100644 --- a/llvm/test/Transforms/InstCombine/memcpy-from-global.ll +++ b/llvm/test/Transforms/InstCombine/memcpy-from-global.ll @@ -178,14 +178,14 @@ define void @test4() { ret void } -declare void @llvm.lifetime.start.p0(i64, ptr) +declare void @llvm.lifetime.start.p0(ptr) define void @test5() { ; CHECK-LABEL: @test5( ; CHECK-NEXT: call void @baz(ptr nonnull byval(i8) @G) ; CHECK-NEXT: ret void ; %A = alloca %T - call void @llvm.lifetime.start.p0(i64 -1, ptr %A) + call void @llvm.lifetime.start.p0(ptr %A) call void @llvm.memcpy.p0.p0.i64(ptr align 4 %A, ptr align 4 @G, i64 124, i1 false) call void @baz(ptr byval(i8) %A) ret void @@ -308,7 +308,7 @@ define float @test11(i64 %i) { entry: %a = alloca [4 x float], align 4 - call void @llvm.lifetime.start.p0(i64 16, ptr %a) + call void @llvm.lifetime.start.p0(ptr %a) call void @llvm.memcpy.p0.p1.i64(ptr align 4 %a, ptr addrspace(1) align 4 @I, i64 16, i1 false) %g = getelementptr inbounds [4 x float], ptr %a, i64 0, i64 %i %r = load float, ptr %g, align 4 @@ -320,7 +320,7 @@ define float @test11_volatile(i64 %i) { ; CHECK-LABEL: @test11_volatile( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[A:%.*]] = alloca [4 x float], align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[A]]) ; CHECK-NEXT: call void @llvm.memcpy.p0.p1.i64(ptr align 4 [[A]], ptr addrspace(1) align 4 @I, i64 16, i1 true) ; CHECK-NEXT: [[G:%.*]] = getelementptr inbounds [4 x float], ptr [[A]], i64 0, i64 [[I:%.*]] ; CHECK-NEXT: [[R:%.*]] = load float, ptr [[G]], align 4 @@ -329,7 +329,7 @@ define float @test11_volatile(i64 %i) { entry: %a = alloca [4 x float], align 4 - call void @llvm.lifetime.start.p0(i64 16, ptr %a) + call void @llvm.lifetime.start.p0(ptr %a) call void @llvm.memcpy.p0.p1.i64(ptr align 4 %a, ptr addrspace(1) align 4 @I, i64 16, i1 true) %g = getelementptr inbounds [4 x float], ptr %a, i64 0, i64 %i %r = load float, ptr %g, align 4 diff --git a/llvm/test/Transforms/InstCombine/recurrence-binary-intrinsic.ll b/llvm/test/Transforms/InstCombine/recurrence-binary-intrinsic.ll index 86e586e..a4e247e 100644 --- a/llvm/test/Transforms/InstCombine/recurrence-binary-intrinsic.ll +++ b/llvm/test/Transforms/InstCombine/recurrence-binary-intrinsic.ll @@ -236,12 +236,11 @@ define float @simple_recurrence_intrinsic_maximumnum(i32 %n, float %a, float %b) ; CHECK-NEXT: br label %[[LOOP:.*]] ; CHECK: [[LOOP]]: ; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: [[FMAX_ACC:%.*]] = phi float [ [[FMAX:%.*]], %[[LOOP]] ], [ [[A]], %[[ENTRY]] ] -; CHECK-NEXT: [[FMAX]] = call nnan float @llvm.maximumnum.f32(float [[FMAX_ACC]], float [[B]]) ; CHECK-NEXT: [[IV_NEXT]] = add nuw i32 [[IV]], 1 ; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[IV_NEXT]], [[N]] ; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]] ; CHECK: [[EXIT]]: +; CHECK-NEXT: [[FMAX:%.*]] = call nnan float @llvm.maximumnum.f32(float [[A]], float [[B]]) ; CHECK-NEXT: ret float [[FMAX]] ; entry: @@ -265,12 +264,11 @@ define float @simple_recurrence_intrinsic_minimumnum(i32 %n, float %a, float %b) ; CHECK-NEXT: br label %[[LOOP:.*]] ; CHECK: [[LOOP]]: ; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: [[FMIN_ACC:%.*]] = phi float [ [[FMIN:%.*]], %[[LOOP]] ], [ [[A]], %[[ENTRY]] ] -; CHECK-NEXT: [[FMIN]] = call nnan float @llvm.minimumnum.f32(float [[FMIN_ACC]], float [[B]]) ; CHECK-NEXT: [[IV_NEXT]] = add nuw i32 [[IV]], 1 ; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[IV_NEXT]], [[N]] ; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]] ; CHECK: [[EXIT]]: +; CHECK-NEXT: [[FMIN:%.*]] = call nnan float @llvm.minimumnum.f32(float [[A]], float [[B]]) ; CHECK-NEXT: ret float [[FMIN]] ; entry: @@ -296,7 +294,7 @@ define i8 @simple_recurrence_intrinsic_multiuse_phi(i8 %n, i8 %a, i8 %b) { ; CHECK-NEXT: [[IV:%.*]] = phi i8 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ] ; CHECK-NEXT: [[UMAX_ACC:%.*]] = phi i8 [ [[UMAX:%.*]], %[[LOOP]] ], [ [[A]], %[[ENTRY]] ] ; CHECK-NEXT: call void @use(i8 [[UMAX_ACC]]) -; CHECK-NEXT: [[UMAX]] = call i8 @llvm.umax.i8(i8 [[UMAX_ACC]], i8 [[B]]) +; CHECK-NEXT: [[UMAX]] = call i8 @llvm.umax.i8(i8 [[A]], i8 [[B]]) ; CHECK-NEXT: [[IV_NEXT]] = add nuw i8 [[IV]], 1 ; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8 [[IV_NEXT]], [[N]] ; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]] diff --git a/llvm/test/Transforms/InstCombine/sink_sideeffecting_instruction.ll b/llvm/test/Transforms/InstCombine/sink_sideeffecting_instruction.ll index ccb9601..bd43daa 100644 --- a/llvm/test/Transforms/InstCombine/sink_sideeffecting_instruction.ll +++ b/llvm/test/Transforms/InstCombine/sink_sideeffecting_instruction.ll @@ -49,12 +49,12 @@ define i32 @test() { ; CHECK-NEXT: bb: ; CHECK-NEXT: [[VAR:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[VAR1:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[VAR]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[VAR]]) ; CHECK-NEXT: [[VAR3:%.*]] = call i32 @foo(ptr nonnull writeonly [[VAR]]) ; CHECK-NEXT: [[VAR4:%.*]] = icmp eq i32 [[VAR3]], 0 ; CHECK-NEXT: br i1 [[VAR4]], label [[BB5:%.*]], label [[BB14:%.*]] ; CHECK: bb5: -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[VAR1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[VAR1]]) ; CHECK-NEXT: [[VAR8:%.*]] = load i32, ptr [[VAR]], align 4 ; CHECK-NEXT: [[VAR9:%.*]] = icmp eq i32 [[VAR8]], 0 ; CHECK-NEXT: [[VAR7:%.*]] = call i32 @foo(ptr nonnull writeonly [[VAR1]]) @@ -66,23 +66,23 @@ define i32 @test() { ; CHECK-NEXT: br label [[BB12]] ; CHECK: bb12: ; CHECK-NEXT: [[VAR13:%.*]] = phi i32 [ [[VAR11]], [[BB10]] ], [ [[VAR7]], [[BB_CRIT_EDGE]] ] -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[VAR1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[VAR1]]) ; CHECK-NEXT: br label [[BB14]] ; CHECK: bb14: ; CHECK-NEXT: [[VAR15:%.*]] = phi i32 [ [[VAR13]], [[BB12]] ], [ 0, [[BB:%.*]] ] -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[VAR]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[VAR]]) ; CHECK-NEXT: ret i32 [[VAR15]] ; bb: %var = alloca i32, align 4 %var1 = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %var) #4 + call void @llvm.lifetime.start.p0(ptr nonnull %var) #4 %var3 = call i32 @foo(ptr nonnull writeonly %var) %var4 = icmp eq i32 %var3, 0 br i1 %var4, label %bb5, label %bb14 bb5: ; preds = %bb - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %var1) #4 + call void @llvm.lifetime.start.p0(ptr nonnull %var1) #4 %var8 = load i32, ptr %var, align 4 %var9 = icmp eq i32 %var8, 0 %var7 = call i32 @foo(ptr nonnull writeonly %var1) @@ -97,12 +97,12 @@ bb_crit_edge: bb12: ; preds = %bb10, %bb5 %var13 = phi i32 [ %var11, %bb10 ], [ %var7, %bb_crit_edge ] - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %var1) #4 + call void @llvm.lifetime.end.p0(ptr nonnull %var1) #4 br label %bb14 bb14: ; preds = %bb12, %bb %var15 = phi i32 [ %var13, %bb12 ], [ 0, %bb ] - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %var) + call void @llvm.lifetime.end.p0(ptr nonnull %var) ret i32 %var15 } @@ -325,18 +325,18 @@ define i32 @sink_lifetime1(i1 %c) { ; CHECK-LABEL: @sink_lifetime1( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[VAR:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[VAR]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[VAR]]) ; CHECK-NEXT: [[VAR3:%.*]] = call i32 @unknown(ptr nonnull [[VAR]]) #[[ATTR1]] ; CHECK-NEXT: br i1 [[C:%.*]], label [[EARLY_RETURN:%.*]], label [[USE_BLOCK:%.*]] ; CHECK: early_return: ; CHECK-NEXT: ret i32 0 ; CHECK: use_block: -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[VAR]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[VAR]]) ; CHECK-NEXT: ret i32 [[VAR3]] ; entry: %var = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr %var) + call void @llvm.lifetime.start.p0(ptr %var) %var3 = call i32 @unknown(ptr %var) argmemonly nounwind willreturn br i1 %c, label %early_return, label %use_block @@ -344,7 +344,7 @@ early_return: ret i32 0 use_block: - call void @llvm.lifetime.end.p0(i64 4, ptr %var) + call void @llvm.lifetime.end.p0(ptr %var) ret i32 %var3 } @@ -352,25 +352,25 @@ define i32 @sink_lifetime2(i1 %c) { ; CHECK-LABEL: @sink_lifetime2( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[VAR:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[VAR]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[VAR]]) ; CHECK-NEXT: [[VAR3:%.*]] = call i32 @unknown(ptr nonnull [[VAR]]) #[[ATTR1]] ; CHECK-NEXT: br i1 [[C:%.*]], label [[MERGE:%.*]], label [[USE_BLOCK:%.*]] ; CHECK: merge: ; CHECK-NEXT: [[RET:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[VAR3]], [[USE_BLOCK]] ] -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[VAR]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[VAR]]) ; CHECK-NEXT: ret i32 [[RET]] ; CHECK: use_block: ; CHECK-NEXT: br label [[MERGE]] ; entry: %var = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr %var) + call void @llvm.lifetime.start.p0(ptr %var) %var3 = call i32 @unknown(ptr %var) argmemonly nounwind willreturn br i1 %c, label %merge, label %use_block merge: %ret = phi i32 [0, %entry], [%var3, %use_block] - call void @llvm.lifetime.end.p0(i64 4, ptr %var) + call void @llvm.lifetime.end.p0(ptr %var) ret i32 %ret use_block: @@ -390,8 +390,8 @@ define i32 @sink_lifetime3(i1 %c) { ; entry: %var = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr %var) - call void @llvm.lifetime.end.p0(i64 4, ptr %var) + call void @llvm.lifetime.start.p0(ptr %var) + call void @llvm.lifetime.end.p0(ptr %var) ; If unknown accesses %var, that's UB %var3 = call i32 @unknown(ptr %var) argmemonly nounwind willreturn br i1 %c, label %early_return, label %use_block @@ -407,9 +407,9 @@ define i32 @sink_lifetime4a(i1 %c) { ; CHECK-LABEL: @sink_lifetime4a( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[VAR:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[VAR]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[VAR]]) ; CHECK-NEXT: [[VAR3:%.*]] = call i32 @unknown(ptr nonnull [[VAR]]) #[[ATTR1]] -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[VAR]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[VAR]]) ; CHECK-NEXT: br i1 [[C:%.*]], label [[EARLY_RETURN:%.*]], label [[USE_BLOCK:%.*]] ; CHECK: early_return: ; CHECK-NEXT: ret i32 0 @@ -418,9 +418,9 @@ define i32 @sink_lifetime4a(i1 %c) { ; entry: %var = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr %var) + call void @llvm.lifetime.start.p0(ptr %var) %var3 = call i32 @unknown(ptr %var) argmemonly nounwind willreturn - call void @llvm.lifetime.end.p0(i64 4, ptr %var) + call void @llvm.lifetime.end.p0(ptr %var) br i1 %c, label %early_return, label %use_block early_return: @@ -436,9 +436,9 @@ define i32 @sink_lifetime4b(i1 %c) { ; CHECK-LABEL: @sink_lifetime4b( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[VAR:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[VAR]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[VAR]]) ; CHECK-NEXT: [[VAR3:%.*]] = call i32 @unknown(ptr nonnull writeonly [[VAR]]) #[[ATTR1]] -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[VAR]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[VAR]]) ; CHECK-NEXT: br i1 [[C:%.*]], label [[EARLY_RETURN:%.*]], label [[USE_BLOCK:%.*]] ; CHECK: early_return: ; CHECK-NEXT: ret i32 0 @@ -447,9 +447,9 @@ define i32 @sink_lifetime4b(i1 %c) { ; entry: %var = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr %var) + call void @llvm.lifetime.start.p0(ptr %var) %var3 = call i32 @unknown(ptr writeonly %var) argmemonly nounwind willreturn - call void @llvm.lifetime.end.p0(i64 4, ptr %var) + call void @llvm.lifetime.end.p0(ptr %var) br i1 %c, label %early_return, label %use_block early_return: @@ -486,6 +486,6 @@ use_block: declare i32 @bar() -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) diff --git a/llvm/test/Transforms/InstCombine/trivial-dse-calls.ll b/llvm/test/Transforms/InstCombine/trivial-dse-calls.ll index 128edff..758071a 100644 --- a/llvm/test/Transforms/InstCombine/trivial-dse-calls.ll +++ b/llvm/test/Transforms/InstCombine/trivial-dse-calls.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt -passes=instcombine -S < %s | FileCheck %s -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) declare void @unknown() declare void @f(ptr) @@ -25,9 +25,9 @@ define void @test_lifetime() { ; CHECK-NEXT: ret void ; %a = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr %a) + call void @llvm.lifetime.start.p0(ptr %a) call void @f(ptr writeonly nocapture %a) argmemonly nounwind willreturn - call void @llvm.lifetime.end.p0(i64 4, ptr %a) + call void @llvm.lifetime.end.p0(ptr %a) ret void } @@ -40,11 +40,11 @@ define void @test_lifetime2() { ; CHECK-NEXT: ret void ; %a = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr %a) + call void @llvm.lifetime.start.p0(ptr %a) call void @unknown() call void @f(ptr writeonly nocapture %a) argmemonly nounwind willreturn call void @unknown() - call void @llvm.lifetime.end.p0(i64 4, ptr %a) + call void @llvm.lifetime.end.p0(ptr %a) ret void } diff --git a/llvm/test/Transforms/InstCombine/unreachable-alloca-lifetime-markers.ll b/llvm/test/Transforms/InstCombine/unreachable-alloca-lifetime-markers.ll index ab744c62..9c64bfb 100644 --- a/llvm/test/Transforms/InstCombine/unreachable-alloca-lifetime-markers.ll +++ b/llvm/test/Transforms/InstCombine/unreachable-alloca-lifetime-markers.ll @@ -12,7 +12,7 @@ define void @pr150338(ptr %arg) { %a = alloca i32 store ptr %a, ptr %arg store i1 true, ptr poison - call void @llvm.lifetime.end.p0(i64 4, ptr %a) + call void @llvm.lifetime.end.p0(ptr %a) ret void } @@ -33,7 +33,7 @@ entry: bb1: %phi1 = phi ptr [ null, %entry ], [ %phi2, %bb2 ] - call void @llvm.lifetime.start.p0(i64 4, ptr %a) + call void @llvm.lifetime.start.p0(ptr %a) br label %bb2 bb2: @@ -45,7 +45,7 @@ define void @lifetime_poison() { ; CHECK-LABEL: define void @lifetime_poison() { ; CHECK-NEXT: ret void ; - call void @llvm.lifetime.start.p0(i64 4, ptr poison) - call void @llvm.lifetime.end.p0(i64 4, ptr poison) + call void @llvm.lifetime.start.p0(ptr poison) + call void @llvm.lifetime.end.p0(ptr poison) ret void } diff --git a/llvm/test/Transforms/InstCombine/vararg.ll b/llvm/test/Transforms/InstCombine/vararg.ll index eb24256..93d230d 100644 --- a/llvm/test/Transforms/InstCombine/vararg.ll +++ b/llvm/test/Transforms/InstCombine/vararg.ll @@ -12,14 +12,14 @@ define void @func(ptr nocapture readnone %fmt, ...) { entry: %va0 = alloca %struct.__va_list, align 8 %va1 = alloca %struct.__va_list, align 8 - call void @llvm.lifetime.start.p0(i64 32, ptr %va0) + call void @llvm.lifetime.start.p0(ptr %va0) call void @llvm.va_start(ptr %va0) - call void @llvm.lifetime.start.p0(i64 32, ptr %va1) + call void @llvm.lifetime.start.p0(ptr %va1) call void @llvm.va_copy(ptr %va1, ptr %va0) call void @llvm.va_end(ptr %va1) - call void @llvm.lifetime.end.p0(i64 32, ptr %va1) + call void @llvm.lifetime.end.p0(ptr %va1) call void @llvm.va_end(ptr %va0) - call void @llvm.lifetime.end.p0(i64 32, ptr %va0) + call void @llvm.lifetime.end.p0(ptr %va0) ret void } @@ -31,28 +31,28 @@ define void @func_destroy_copy_src(ptr nocapture readnone %fmt, ...) { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[VA0:%.*]] = alloca [[STRUCT___VA_LIST:%.*]], align 8 ; CHECK-NEXT: [[VA1:%.*]] = alloca [[STRUCT___VA_LIST]], align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[VA0]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[VA1]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[VA0]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[VA1]]) ; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[VA0]]) ; CHECK-NEXT: call void @llvm.va_copy.p0(ptr nonnull [[VA1]], ptr nonnull [[VA0]]) ; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VA0]]) ; CHECK-NEXT: call void @callee(ptr nonnull [[VA1]]) ; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VA1]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[VA1]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[VA0]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[VA1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[VA0]]) ; CHECK-NEXT: ret void ; entry: %va0 = alloca %struct.__va_list, align 8 %va1 = alloca %struct.__va_list, align 8 - call void @llvm.lifetime.start.p0(i64 32, ptr %va0) - call void @llvm.lifetime.start.p0(i64 32, ptr %va1) + call void @llvm.lifetime.start.p0(ptr %va0) + call void @llvm.lifetime.start.p0(ptr %va1) call void @llvm.va_start(ptr %va0) call void @llvm.va_copy(ptr %va1, ptr %va0) call void @llvm.va_end(ptr %va0) call void @callee(ptr %va1) call void @llvm.va_end(ptr %va1) - call void @llvm.lifetime.end.p0(i64 32, ptr %va1) - call void @llvm.lifetime.end.p0(i64 32, ptr %va0) + call void @llvm.lifetime.end.p0(ptr %va1) + call void @llvm.lifetime.end.p0(ptr %va0) ret void } diff --git a/llvm/test/Transforms/LICM/dropped-tbaa.ll b/llvm/test/Transforms/LICM/dropped-tbaa.ll index 11083b4..92839f1 100644 --- a/llvm/test/Transforms/LICM/dropped-tbaa.ll +++ b/llvm/test/Transforms/LICM/dropped-tbaa.ll @@ -24,7 +24,7 @@ define void @foo(ptr %data, ptr %addend) #0 { ; CHECK-NEXT: [[CONV_I:%.*]] = sitofp i32 [[TMP2]] to double entry: %i = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr %i) #2 + call void @llvm.lifetime.start.p0(ptr %i) #2 store i32 0, ptr %i, align 4, !tbaa !1 br i1 true, label %for.body.lr.ph, label %for.cond.cleanup @@ -35,7 +35,7 @@ for.cond.for.cond.cleanup_crit_edge: ; preds = %for.inc br label %for.cond.cleanup for.cond.cleanup: ; preds = %for.cond.for.cond.cleanup_crit_edge, %entry - call void @llvm.lifetime.end.p0(i64 4, ptr %i) #2 + call void @llvm.lifetime.end.p0(ptr %i) #2 br label %for.end for.body: ; preds = %for.body.lr.ph, %for.inc @@ -67,8 +67,8 @@ for.end: ; preds = %for.cond.cleanup ret void } -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #0 -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #0 +declare void @llvm.lifetime.start.p0(ptr nocapture) #0 +declare void @llvm.lifetime.end.p0(ptr nocapture) #0 attributes #0 = { argmemonly nounwind } diff --git a/llvm/test/Transforms/LICM/hoisting-preheader-debugloc.ll b/llvm/test/Transforms/LICM/hoisting-preheader-debugloc.ll index 61f0eb19..0aa56d2 100644 --- a/llvm/test/Transforms/LICM/hoisting-preheader-debugloc.ll +++ b/llvm/test/Transforms/LICM/hoisting-preheader-debugloc.ll @@ -17,7 +17,7 @@ declare i16 @e(i32) define i16 @g() !dbg !13 { entry: %l_284 = alloca [2 x [3 x [6 x i32]]], align 16 - call void @llvm.lifetime.start.p0(i64 144, ptr nonnull %l_284), !dbg !24 + call void @llvm.lifetime.start.p0(ptr nonnull %l_284), !dbg !24 call void @llvm.dbg.declare(metadata ptr %l_284, metadata !17, metadata !DIExpression()), !dbg !25 %0 = load i16, ptr @a, align 2, !dbg !26, !tbaa !29 %cmp11 = icmp sgt i16 %0, -1, !dbg !33 @@ -51,15 +51,15 @@ for.body.cleanup_crit_edge: ; preds = %for.body br label %cleanup, !dbg !38 cleanup: ; preds = %for.body.cleanup_crit_edge, %for.cond.cleanup_crit_edge, %entry - call void @llvm.lifetime.end.p0(i64 144, ptr nonnull %l_284), !dbg !51 + call void @llvm.lifetime.end.p0(ptr nonnull %l_284), !dbg !51 ret i16 1, !dbg !51 } ; Function Attrs: argmemonly nocallback nofree nosync nounwind willreturn -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 ; Function Attrs: argmemonly nocallback nofree nosync nounwind willreturn -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 ; Function Attrs: nocallback nofree nosync nounwind readnone speculatable willreturn declare void @llvm.dbg.value(metadata, metadata, metadata) #0 diff --git a/llvm/test/Transforms/LICM/loopsink-pr38462.ll b/llvm/test/Transforms/LICM/loopsink-pr38462.ll index 51eee1f..8b2ff10 100644 --- a/llvm/test/Transforms/LICM/loopsink-pr38462.ll +++ b/llvm/test/Transforms/LICM/loopsink-pr38462.ll @@ -37,7 +37,7 @@ __except: catchret from %1 to label %__except3 __except3: - call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %s) + call void @llvm.lifetime.start.p0(ptr nonnull %s) %call.i = call zeroext i1 @g(ptr nonnull %s) br i1 %call.i, label %if.then.i, label %exit @@ -46,7 +46,7 @@ if.then.i: br label %exit exit: - call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %s) + call void @llvm.lifetime.end.p0(ptr nonnull %s) br label %__try.cont __try.cont: @@ -58,8 +58,8 @@ __try.cont: declare i32 @__C_specific_handler(...) declare i32 @f() declare zeroext i1 @g(ptr) -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) !1 = !{!"function_entry_count", i64 1} diff --git a/llvm/test/Transforms/LoopStrengthReduce/X86/lifetime-use.ll b/llvm/test/Transforms/LoopStrengthReduce/X86/lifetime-use.ll index c7a0de22..970643a 100644 --- a/llvm/test/Transforms/LoopStrengthReduce/X86/lifetime-use.ll +++ b/llvm/test/Transforms/LoopStrengthReduce/X86/lifetime-use.ll @@ -6,7 +6,7 @@ define void @test(ptr %p, i64 %idx) { ; CHECK-SAME: ptr [[P:%.*]], i64 [[IDX:%.*]]) { ; CHECK-NEXT: [[ENTRY:.*]]: ; CHECK-NEXT: [[ALLOCA:%.*]] = alloca [4 x [4 x i32]], align 16 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 64, ptr [[ALLOCA]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ALLOCA]]) ; CHECK-NEXT: [[TMP0:%.*]] = shl i64 [[IDX]], 6 ; CHECK-NEXT: [[TMP1:%.*]] = add nuw nsw i64 [[TMP0]], 48 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP1]] @@ -31,12 +31,12 @@ define void @test(ptr %p, i64 %idx) { ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[LSR_IV_NEXT]], 0 ; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[LOOP]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 64, ptr [[ALLOCA]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ALLOCA]]) ; CHECK-NEXT: ret void ; entry: %alloca = alloca [4 x [4 x i32]], align 16 - call void @llvm.lifetime.start.p0(i64 64, ptr %alloca) + call void @llvm.lifetime.start.p0(ptr %alloca) br label %loop loop: @@ -54,6 +54,6 @@ loop: br i1 %exitcond.not, label %exit, label %loop exit: - call void @llvm.lifetime.end.p0(i64 64, ptr %alloca) + call void @llvm.lifetime.end.p0(ptr %alloca) ret void } diff --git a/llvm/test/Transforms/LoopStrengthReduce/lsr-comp-time.ll b/llvm/test/Transforms/LoopStrengthReduce/lsr-comp-time.ll index 3ec4fea..f8c5e82 100644 --- a/llvm/test/Transforms/LoopStrengthReduce/lsr-comp-time.ll +++ b/llvm/test/Transforms/LoopStrengthReduce/lsr-comp-time.ll @@ -5,10 +5,10 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" ; Function Attrs: argmemonly nounwind -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 ; Function Attrs: argmemonly nounwind -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 ; Function Attrs: nounwind readnone uwtable define dso_local i32 @foo(i32 %arg, i32 %arg1, i32 %arg2, i32 %arg3, i32 %arg4, i32 %arg5, i32 %arg6) local_unnamed_addr #3 { @@ -83,31 +83,31 @@ bb: %tmp16 = alloca [100 x [100 x i32]], align 16 %tmp17 = alloca [100 x [100 x i32]], align 16 %tmp18 = alloca [100 x [100 x i32]], align 16 - call void @llvm.lifetime.start.p0(i64 400, ptr nonnull %tmp) #4 + call void @llvm.lifetime.start.p0(ptr nonnull %tmp) #4 call void @llvm.memset.p0.i64(ptr nonnull align 16 %tmp, i8 0, i64 400, i1 false) - call void @llvm.lifetime.start.p0(i64 400, ptr nonnull %tmp7) #4 + call void @llvm.lifetime.start.p0(ptr nonnull %tmp7) #4 call void @llvm.memset.p0.i64(ptr nonnull align 16 %tmp7, i8 0, i64 400, i1 false) - call void @llvm.lifetime.start.p0(i64 400, ptr nonnull %tmp8) #4 + call void @llvm.lifetime.start.p0(ptr nonnull %tmp8) #4 call void @llvm.memset.p0.i64(ptr nonnull align 16 %tmp8, i8 0, i64 400, i1 false) - call void @llvm.lifetime.start.p0(i64 40000, ptr nonnull %tmp9) #4 + call void @llvm.lifetime.start.p0(ptr nonnull %tmp9) #4 call void @llvm.memset.p0.i64(ptr nonnull align 16 %tmp9, i8 0, i64 40000, i1 false) - call void @llvm.lifetime.start.p0(i64 400, ptr nonnull %tmp10) #4 + call void @llvm.lifetime.start.p0(ptr nonnull %tmp10) #4 call void @llvm.memset.p0.i64(ptr nonnull align 16 %tmp10, i8 0, i64 400, i1 false) - call void @llvm.lifetime.start.p0(i64 40000, ptr nonnull %tmp11) #4 + call void @llvm.lifetime.start.p0(ptr nonnull %tmp11) #4 call void @llvm.memset.p0.i64(ptr nonnull align 16 %tmp11, i8 0, i64 40000, i1 false) - call void @llvm.lifetime.start.p0(i64 400, ptr nonnull %tmp12) #4 + call void @llvm.lifetime.start.p0(ptr nonnull %tmp12) #4 call void @llvm.memset.p0.i64(ptr nonnull align 16 %tmp12, i8 0, i64 400, i1 false) - call void @llvm.lifetime.start.p0(i64 400, ptr nonnull %tmp13) #4 + call void @llvm.lifetime.start.p0(ptr nonnull %tmp13) #4 call void @llvm.memset.p0.i64(ptr nonnull align 16 %tmp13, i8 0, i64 400, i1 false) - call void @llvm.lifetime.start.p0(i64 40000, ptr nonnull %tmp14) #4 + call void @llvm.lifetime.start.p0(ptr nonnull %tmp14) #4 call void @llvm.memset.p0.i64(ptr nonnull align 16 %tmp14, i8 0, i64 40000, i1 false) - call void @llvm.lifetime.start.p0(i64 400, ptr nonnull %tmp15) #4 + call void @llvm.lifetime.start.p0(ptr nonnull %tmp15) #4 call void @llvm.memset.p0.i64(ptr nonnull align 16 %tmp15, i8 0, i64 400, i1 false) - call void @llvm.lifetime.start.p0(i64 40000, ptr nonnull %tmp16) #4 + call void @llvm.lifetime.start.p0(ptr nonnull %tmp16) #4 call void @llvm.memset.p0.i64(ptr nonnull align 16 %tmp16, i8 0, i64 40000, i1 false) - call void @llvm.lifetime.start.p0(i64 40000, ptr nonnull %tmp17) #4 + call void @llvm.lifetime.start.p0(ptr nonnull %tmp17) #4 call void @llvm.memset.p0.i64(ptr nonnull align 16 %tmp17, i8 0, i64 40000, i1 false) - call void @llvm.lifetime.start.p0(i64 40000, ptr nonnull %tmp18) #4 + call void @llvm.lifetime.start.p0(ptr nonnull %tmp18) #4 call void @llvm.memset.p0.i64(ptr nonnull align 16 %tmp18, i8 0, i64 40000, i1 false) %tmp32 = getelementptr inbounds [100 x i32], ptr %tmp8, i64 0, i64 3 br label %bb33 @@ -1300,19 +1300,19 @@ bb1051: ; preds = %bb1007 %tmp1063 = sub i32 %tmp1062, %tmp960 %tmp1064 = add i32 %tmp1063, %tmp1004 %tmp1065 = sub i32 %tmp1064, %tmp1048 - call void @llvm.lifetime.end.p0(i64 40000, ptr nonnull %tmp18) #4 - call void @llvm.lifetime.end.p0(i64 40000, ptr nonnull %tmp17) #4 - call void @llvm.lifetime.end.p0(i64 40000, ptr nonnull %tmp16) #4 - call void @llvm.lifetime.end.p0(i64 400, ptr nonnull %tmp15) #4 - call void @llvm.lifetime.end.p0(i64 40000, ptr nonnull %tmp14) #4 - call void @llvm.lifetime.end.p0(i64 400, ptr nonnull %tmp13) #4 - call void @llvm.lifetime.end.p0(i64 400, ptr nonnull %tmp12) #4 - call void @llvm.lifetime.end.p0(i64 40000, ptr nonnull %tmp11) #4 - call void @llvm.lifetime.end.p0(i64 400, ptr nonnull %tmp10) #4 - call void @llvm.lifetime.end.p0(i64 40000, ptr nonnull %tmp9) #4 - call void @llvm.lifetime.end.p0(i64 400, ptr nonnull %tmp8) #4 - call void @llvm.lifetime.end.p0(i64 400, ptr nonnull %tmp7) #4 - call void @llvm.lifetime.end.p0(i64 400, ptr nonnull %tmp) #4 + call void @llvm.lifetime.end.p0(ptr nonnull %tmp18) #4 + call void @llvm.lifetime.end.p0(ptr nonnull %tmp17) #4 + call void @llvm.lifetime.end.p0(ptr nonnull %tmp16) #4 + call void @llvm.lifetime.end.p0(ptr nonnull %tmp15) #4 + call void @llvm.lifetime.end.p0(ptr nonnull %tmp14) #4 + call void @llvm.lifetime.end.p0(ptr nonnull %tmp13) #4 + call void @llvm.lifetime.end.p0(ptr nonnull %tmp12) #4 + call void @llvm.lifetime.end.p0(ptr nonnull %tmp11) #4 + call void @llvm.lifetime.end.p0(ptr nonnull %tmp10) #4 + call void @llvm.lifetime.end.p0(ptr nonnull %tmp9) #4 + call void @llvm.lifetime.end.p0(ptr nonnull %tmp8) #4 + call void @llvm.lifetime.end.p0(ptr nonnull %tmp7) #4 + call void @llvm.lifetime.end.p0(ptr nonnull %tmp) #4 ret i32 %tmp1065 } diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/clamped-trip-count.ll b/llvm/test/Transforms/LoopVectorize/AArch64/clamped-trip-count.ll index a8d9a0c..aa22252b 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/clamped-trip-count.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/clamped-trip-count.ll @@ -9,19 +9,13 @@ define void @clamped_tc_8(ptr nocapture %dst, i32 %n, i64 %val) vscale_range(1,1 ; CHECK: vector.ph: ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8 -; CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[TMP1]], 1 -; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 8, [[TMP4]] -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] -; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 8 ; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 0, i64 8) ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[VAL]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer ; CHECK-NEXT: [[TMP8:%.*]] = call <vscale x 8 x i64> @llvm.stepvector.nxv8i64() ; CHECK-NEXT: [[TMP7:%.*]] = mul <vscale x 8 x i64> [[TMP8]], splat (i64 1) ; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 8 x i64> zeroinitializer, [[TMP7]] -; CHECK-NEXT: [[TMP12:%.*]] = mul i64 1, [[TMP6]] +; CHECK-NEXT: [[TMP12:%.*]] = mul i64 1, [[TMP1]] ; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP12]], i64 0 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] @@ -34,7 +28,7 @@ define void @clamped_tc_8(ptr nocapture %dst, i32 %n, i64 %val) vscale_range(1,1 ; CHECK-NEXT: [[TMP11:%.*]] = lshr <vscale x 8 x i64> [[BROADCAST_SPLAT]], [[TMP10]] ; CHECK-NEXT: [[TMP14:%.*]] = trunc <vscale x 8 x i64> [[TMP11]] to <vscale x 8 x i8> ; CHECK-NEXT: call void @llvm.masked.store.nxv8i8.p0(<vscale x 8 x i8> [[TMP14]], ptr [[NEXT_GEP]], i32 1, <vscale x 8 x i1> [[ACTIVE_LANE_MASK]]) -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP6]] +; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]] ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[INDEX_NEXT]], i64 8) ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 8 x i64> [[VEC_IND]], [[DOTSPLAT]] ; CHECK-NEXT: br i1 true, label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] @@ -92,19 +86,13 @@ define void @clamped_tc_max_8(ptr nocapture %dst, i32 %n, i64 %val) vscale_range ; CHECK: vector.ph: ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8 -; CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[TMP1]], 1 -; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[WIDE_TRIP_COUNT]], [[TMP4]] -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] -; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 8 ; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 0, i64 [[WIDE_TRIP_COUNT]]) ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[VAL]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer ; CHECK-NEXT: [[TMP8:%.*]] = call <vscale x 8 x i64> @llvm.stepvector.nxv8i64() ; CHECK-NEXT: [[TMP7:%.*]] = mul <vscale x 8 x i64> [[TMP8]], splat (i64 1) ; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 8 x i64> zeroinitializer, [[TMP7]] -; CHECK-NEXT: [[TMP12:%.*]] = mul i64 1, [[TMP6]] +; CHECK-NEXT: [[TMP12:%.*]] = mul i64 1, [[TMP1]] ; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP12]], i64 0 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] @@ -117,7 +105,7 @@ define void @clamped_tc_max_8(ptr nocapture %dst, i32 %n, i64 %val) vscale_range ; CHECK-NEXT: [[TMP11:%.*]] = lshr <vscale x 8 x i64> [[BROADCAST_SPLAT]], [[TMP10]] ; CHECK-NEXT: [[TMP14:%.*]] = trunc <vscale x 8 x i64> [[TMP11]] to <vscale x 8 x i8> ; CHECK-NEXT: call void @llvm.masked.store.nxv8i8.p0(<vscale x 8 x i8> [[TMP14]], ptr [[NEXT_GEP]], i32 1, <vscale x 8 x i1> [[ACTIVE_LANE_MASK]]) -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP6]] +; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]] ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[INDEX_NEXT]], i64 [[WIDE_TRIP_COUNT]]) ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 8 x i64> [[VEC_IND]], [[DOTSPLAT]] ; CHECK-NEXT: br i1 true, label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll index 4b895ae..6d16339 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll @@ -702,12 +702,6 @@ define void @multiple_exit_conditions(ptr %src, ptr noalias %dst) #1 { ; PRED: [[VECTOR_PH]]: ; PRED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; PRED-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; PRED-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 -; PRED-NEXT: [[N_RND_UP:%.*]] = add i64 257, [[TMP2]] -; PRED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; PRED-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] -; PRED-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; PRED-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; PRED-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() ; PRED-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 2 ; PRED-NEXT: [[TMP8:%.*]] = sub i64 257, [[TMP7]] @@ -726,7 +720,7 @@ define void @multiple_exit_conditions(ptr %src, ptr noalias %dst) #1 { ; PRED-NEXT: [[TMP13:%.*]] = or <vscale x 2 x i16> [[BROADCAST_SPLAT]], splat (i16 1) ; PRED-NEXT: [[TMP14:%.*]] = uitofp <vscale x 2 x i16> [[TMP13]] to <vscale x 2 x double> ; PRED-NEXT: call void @llvm.masked.store.nxv2f64.p0(<vscale x 2 x double> [[TMP14]], ptr [[NEXT_GEP]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]]) -; PRED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP5]] +; PRED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]] ; PRED-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX]], i64 [[TMP10]]) ; PRED-NEXT: [[TMP16:%.*]] = xor <vscale x 2 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) ; PRED-NEXT: [[TMP17:%.*]] = extractelement <vscale x 2 x i1> [[TMP16]], i32 0 @@ -1242,9 +1236,6 @@ define void @test_conditional_interleave_group (ptr noalias %src.1, ptr noalias ; PRED-NEXT: [[TMP14:%.*]] = or i1 [[TMP13]], [[TMP12]] ; PRED-NEXT: br i1 [[TMP14]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; PRED: [[VECTOR_PH]]: -; PRED-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP0]], 7 -; PRED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 8 -; PRED-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; PRED-NEXT: [[TMP15:%.*]] = sub i64 [[TMP0]], 8 ; PRED-NEXT: [[TMP16:%.*]] = icmp ugt i64 [[TMP0]], 8 ; PRED-NEXT: [[TMP17:%.*]] = select i1 [[TMP16]], i64 [[TMP15]], i64 0 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/divs-with-scalable-vfs.ll b/llvm/test/Transforms/LoopVectorize/AArch64/divs-with-scalable-vfs.ll index 1ad1e426..6ff6bb4 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/divs-with-scalable-vfs.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/divs-with-scalable-vfs.ll @@ -24,8 +24,6 @@ define void @sdiv_feeding_gep(ptr %dst, i32 %x, i64 %M, i64 %conv6, i64 %N) { ; CHECK-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 4 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP9]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 4 ; CHECK-NEXT: [[TMP18:%.*]] = sdiv i64 [[M]], [[CONV6]] ; CHECK-NEXT: [[TMP20:%.*]] = trunc i64 [[TMP18]] to i32 ; CHECK-NEXT: [[TMP22:%.*]] = mul i64 [[TMP18]], [[CONV61]] @@ -43,7 +41,7 @@ define void @sdiv_feeding_gep(ptr %dst, i32 %x, i64 %M, i64 %conv6, i64 %N) { ; CHECK-NEXT: [[TMP39:%.*]] = getelementptr double, ptr [[TMP34]], i64 [[TMP38]] ; CHECK-NEXT: store <vscale x 2 x double> zeroinitializer, ptr [[TMP34]], align 8 ; CHECK-NEXT: store <vscale x 2 x double> zeroinitializer, ptr [[TMP39]], align 8 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP11]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP9]] ; CHECK-NEXT: [[TMP40:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP40]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: @@ -110,12 +108,6 @@ define void @sdiv_feeding_gep_predicated(ptr %dst, i32 %x, i64 %M, i64 %conv6, i ; CHECK: [[VECTOR_PH]]: ; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2 -; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], 1 -; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP7]] -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP6]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] -; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 2 ; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 2 ; CHECK-NEXT: [[TMP12:%.*]] = sub i64 [[N]], [[TMP11]] @@ -127,7 +119,7 @@ define void @sdiv_feeding_gep_predicated(ptr %dst, i32 %x, i64 %M, i64 %conv6, i ; CHECK-NEXT: [[TMP15:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64() ; CHECK-NEXT: [[TMP17:%.*]] = mul <vscale x 2 x i64> [[TMP15]], splat (i64 1) ; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP17]] -; CHECK-NEXT: [[TMP20:%.*]] = mul i64 1, [[TMP9]] +; CHECK-NEXT: [[TMP20:%.*]] = mul i64 1, [[TMP6]] ; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP20]], i64 0 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] @@ -149,7 +141,7 @@ define void @sdiv_feeding_gep_predicated(ptr %dst, i32 %x, i64 %M, i64 %conv6, i ; CHECK-NEXT: [[TMP33:%.*]] = sext i32 [[TMP32]] to i64 ; CHECK-NEXT: [[TMP34:%.*]] = getelementptr double, ptr [[DST]], i64 [[TMP33]] ; CHECK-NEXT: call void @llvm.masked.store.nxv2f64.p0(<vscale x 2 x double> zeroinitializer, ptr [[TMP34]], i32 8, <vscale x 2 x i1> [[TMP23]]) -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP9]] +; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP6]] ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX]], i64 [[TMP14]]) ; CHECK-NEXT: [[TMP36:%.*]] = xor <vscale x 2 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[DOTSPLAT]] @@ -232,12 +224,6 @@ define void @udiv_urem_feeding_gep(i64 %x, ptr %dst, i64 %N) { ; CHECK: [[VECTOR_PH]]: ; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2 -; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], 1 -; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP0]], [[TMP7]] -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP6]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] -; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 2 ; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 2 ; CHECK-NEXT: [[TMP12:%.*]] = sub i64 [[TMP0]], [[TMP11]] @@ -249,7 +235,7 @@ define void @udiv_urem_feeding_gep(i64 %x, ptr %dst, i64 %N) { ; CHECK-NEXT: [[TMP15:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64() ; CHECK-NEXT: [[TMP17:%.*]] = mul <vscale x 2 x i64> [[TMP15]], splat (i64 1) ; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP17]] -; CHECK-NEXT: [[TMP20:%.*]] = mul i64 1, [[TMP9]] +; CHECK-NEXT: [[TMP20:%.*]] = mul i64 1, [[TMP6]] ; CHECK-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP20]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT3]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] @@ -274,7 +260,7 @@ define void @udiv_urem_feeding_gep(i64 %x, ptr %dst, i64 %N) { ; CHECK-NEXT: [[TMP37:%.*]] = ashr i64 [[TMP36]], 32 ; CHECK-NEXT: [[TMP38:%.*]] = getelementptr i64, ptr [[DST]], i64 [[TMP37]] ; CHECK-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP23]], ptr [[TMP38]], i32 4, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]]) -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP9]] +; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP6]] ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX]], i64 [[TMP14]]) ; CHECK-NEXT: [[TMP47:%.*]] = xor <vscale x 2 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT4]] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/eliminate-tail-predication.ll b/llvm/test/Transforms/LoopVectorize/AArch64/eliminate-tail-predication.ll index ab008e7..6a592ed 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/eliminate-tail-predication.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/eliminate-tail-predication.ll @@ -17,16 +17,14 @@ define void @f1(ptr %A) #0 { ; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] -; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: store <vscale x 4 x i32> splat (i32 1), ptr [[TMP6]], align 4 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]] +; CHECK-NEXT: store <vscale x 4 x i32> splat (i32 1), ptr [[TMP4]], align 4 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] +; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/gather-do-not-vectorize-addressing.ll b/llvm/test/Transforms/LoopVectorize/AArch64/gather-do-not-vectorize-addressing.ll index 596a2ed..a8d0b37 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/gather-do-not-vectorize-addressing.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/gather-do-not-vectorize-addressing.ll @@ -80,8 +80,6 @@ define dso_local double @test(ptr nocapture noundef readonly %data, ptr nocaptur ; SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 ; SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]] ; SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]] -; SVE-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() -; SVE-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 2 ; SVE-NEXT: br label [[VECTOR_BODY:%.*]] ; SVE: vector.body: ; SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -92,7 +90,7 @@ define dso_local double @test(ptr nocapture noundef readonly %data, ptr nocaptur ; SVE-NEXT: [[TMP8:%.*]] = getelementptr inbounds double, ptr [[DATA:%.*]], <vscale x 2 x i64> [[TMP7]] ; SVE-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64.nxv2p0(<vscale x 2 x ptr> [[TMP8]], i32 8, <vscale x 2 x i1> splat (i1 true), <vscale x 2 x double> poison) ; SVE-NEXT: [[TMP9]] = fadd <vscale x 2 x double> [[VEC_PHI]], [[WIDE_MASKED_GATHER]] -; SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP11]] +; SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; SVE-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; SVE-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; SVE: middle.block: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs-sve.ll b/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs-sve.ll index 8b354d9..3e417a0 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs-sve.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs-sve.ll @@ -28,8 +28,6 @@ define void @iv_casts(ptr %dst, ptr %src, i32 %x, i64 %N) #0 { ; DEFAULT-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 16 ; DEFAULT-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP10]] ; DEFAULT-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]] -; DEFAULT-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() -; DEFAULT-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 16 ; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i32> poison, i32 [[X]], i64 0 ; DEFAULT-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 8 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer ; DEFAULT-NEXT: [[TMP13:%.*]] = trunc <vscale x 8 x i32> [[BROADCAST_SPLAT]] to <vscale x 8 x i16> @@ -60,7 +58,7 @@ define void @iv_casts(ptr %dst, ptr %src, i32 %x, i64 %N) #0 { ; DEFAULT-NEXT: [[TMP43:%.*]] = getelementptr i8, ptr [[TMP38]], i64 [[TMP42]] ; DEFAULT-NEXT: store <vscale x 8 x i8> [[TMP36]], ptr [[TMP38]], align 1 ; DEFAULT-NEXT: store <vscale x 8 x i8> [[TMP37]], ptr [[TMP43]], align 1 -; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]] +; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]] ; DEFAULT-NEXT: [[TMP44:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; DEFAULT-NEXT: br i1 [[TMP44]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; DEFAULT: [[MIDDLE_BLOCK]]: @@ -103,12 +101,6 @@ define void @iv_casts(ptr %dst, ptr %src, i32 %x, i64 %N) #0 { ; PRED: [[VECTOR_PH]]: ; PRED-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; PRED-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 16 -; PRED-NEXT: [[TMP8:%.*]] = sub i64 [[TMP5]], 1 -; PRED-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP0]], [[TMP8]] -; PRED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] -; PRED-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] -; PRED-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() -; PRED-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 16 ; PRED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[X]], i64 0 ; PRED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer ; PRED-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() @@ -132,7 +124,7 @@ define void @iv_casts(ptr %dst, ptr %src, i32 %x, i64 %N) #0 { ; PRED-NEXT: [[TMP23:%.*]] = trunc <vscale x 16 x i16> [[TMP21]] to <vscale x 16 x i8> ; PRED-NEXT: [[TMP26:%.*]] = getelementptr i8, ptr [[DST]], i64 [[INDEX]] ; PRED-NEXT: call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[TMP23]], ptr [[TMP26]], i32 1, <vscale x 16 x i1> [[ACTIVE_LANE_MASK]]) -; PRED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP10]] +; PRED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP5]] ; PRED-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64 [[INDEX]], i64 [[TMP15]]) ; PRED-NEXT: [[TMP28:%.*]] = xor <vscale x 16 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) ; PRED-NEXT: [[TMP29:%.*]] = extractelement <vscale x 16 x i1> [[TMP28]], i32 0 @@ -270,9 +262,6 @@ define void @iv_trunc(i32 %x, ptr %dst, i64 %N) #0 { ; PRED-NEXT: [[TMP12:%.*]] = or i1 [[TMP8]], [[TMP11]] ; PRED-NEXT: br i1 [[TMP12]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; PRED: [[VECTOR_PH]]: -; PRED-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP0]], 1 -; PRED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 2 -; PRED-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; PRED-NEXT: [[TMP13:%.*]] = sub i64 [[TMP0]], 2 ; PRED-NEXT: [[TMP14:%.*]] = icmp ugt i64 [[TMP0]], 2 ; PRED-NEXT: [[TMP15:%.*]] = select i1 [[TMP14]], i64 [[TMP13]], i64 0 @@ -441,9 +430,6 @@ define void @trunc_ivs_and_store(i32 %x, ptr %dst, i64 %N) #0 { ; PRED-NEXT: [[TMP13:%.*]] = or i1 [[TMP9]], [[TMP12]] ; PRED-NEXT: br i1 [[TMP13]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; PRED: [[VECTOR_PH]]: -; PRED-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP0]], 3 -; PRED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 4 -; PRED-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; PRED-NEXT: [[TMP14:%.*]] = sub i64 [[TMP0]], 4 ; PRED-NEXT: [[TMP15:%.*]] = icmp ugt i64 [[TMP0]], 4 ; PRED-NEXT: [[TMP16:%.*]] = select i1 [[TMP15]], i64 [[TMP14]], i64 0 @@ -635,9 +621,6 @@ define void @ivs_trunc_and_ext(i32 %x, ptr %dst, i64 %N) #0 { ; PRED-NEXT: [[TMP12:%.*]] = or i1 [[TMP8]], [[TMP11]] ; PRED-NEXT: br i1 [[TMP12]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; PRED: [[VECTOR_PH]]: -; PRED-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP0]], 3 -; PRED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 4 -; PRED-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; PRED-NEXT: [[TMP13:%.*]] = sub i64 [[TMP0]], 4 ; PRED-NEXT: [[TMP14:%.*]] = icmp ugt i64 [[TMP0]], 4 ; PRED-NEXT: [[TMP15:%.*]] = select i1 [[TMP14]], i64 [[TMP13]], i64 0 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/interleave-with-gaps.ll b/llvm/test/Transforms/LoopVectorize/AArch64/interleave-with-gaps.ll index 649be65..1c4b621 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/interleave-with-gaps.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/interleave-with-gaps.ll @@ -55,8 +55,6 @@ define i64 @vector_loop_with_remaining_iterations(ptr %src, ptr noalias %dst, i3 ; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[N_MOD_VF]], 0 ; CHECK-NEXT: [[TMP19:%.*]] = select i1 [[TMP18]], i64 [[TMP17]], i64 [[N_MOD_VF]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 17, [[TMP19]] -; CHECK-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP21:%.*]] = mul nuw i64 [[TMP20]], 2 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[X]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer ; CHECK-NEXT: [[TMP22:%.*]] = insertelement <vscale x 2 x i64> zeroinitializer, i64 [[BC_MERGE_RDX]], i32 0 @@ -67,7 +65,7 @@ define i64 @vector_loop_with_remaining_iterations(ptr %src, ptr noalias %dst, i3 ; CHECK-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT3]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer ; CHECK-NEXT: [[TMP26:%.*]] = mul <vscale x 2 x i64> [[TMP25]], splat (i64 1) ; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> [[BROADCAST_SPLAT4]], [[TMP26]] -; CHECK-NEXT: [[TMP27:%.*]] = mul i64 1, [[TMP21]] +; CHECK-NEXT: [[TMP27:%.*]] = mul i64 1, [[TMP17]] ; CHECK-NEXT: [[BROADCAST_SPLATINSERT5:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP27]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT6:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT5]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer ; CHECK-NEXT: br label %[[VEC_EPILOG_VECTOR_BODY:.*]] @@ -84,7 +82,7 @@ define i64 @vector_loop_with_remaining_iterations(ptr %src, ptr noalias %dst, i3 ; CHECK-NEXT: store <vscale x 2 x i8> zeroinitializer, ptr [[TMP32]], align 1 ; CHECK-NEXT: [[TMP34:%.*]] = zext <vscale x 2 x i32> [[TMP31]] to <vscale x 2 x i64> ; CHECK-NEXT: [[TMP35]] = or <vscale x 2 x i64> [[VEC_PHI8]], [[TMP34]] -; CHECK-NEXT: [[INDEX_NEXT9]] = add nuw i64 [[INDEX7]], [[TMP21]] +; CHECK-NEXT: [[INDEX_NEXT9]] = add nuw i64 [[INDEX7]], [[TMP17]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT6]] ; CHECK-NEXT: [[TMP36:%.*]] = icmp eq i64 [[INDEX_NEXT9]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP36]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] @@ -193,8 +191,6 @@ define i64 @main_vector_loop_fixed_with_no_remaining_iterations(ptr %src, ptr no ; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[N_MOD_VF]], 0 ; CHECK-NEXT: [[TMP19:%.*]] = select i1 [[TMP18]], i64 [[TMP17]], i64 [[N_MOD_VF]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 17, [[TMP19]] -; CHECK-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP21:%.*]] = mul nuw i64 [[TMP20]], 2 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[X]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer ; CHECK-NEXT: [[TMP22:%.*]] = insertelement <vscale x 2 x i64> zeroinitializer, i64 [[BC_MERGE_RDX]], i32 0 @@ -205,7 +201,7 @@ define i64 @main_vector_loop_fixed_with_no_remaining_iterations(ptr %src, ptr no ; CHECK-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT3]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer ; CHECK-NEXT: [[TMP38:%.*]] = mul <vscale x 2 x i64> [[TMP25]], splat (i64 1) ; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> [[BROADCAST_SPLAT4]], [[TMP38]] -; CHECK-NEXT: [[TMP39:%.*]] = mul i64 1, [[TMP21]] +; CHECK-NEXT: [[TMP39:%.*]] = mul i64 1, [[TMP17]] ; CHECK-NEXT: [[BROADCAST_SPLATINSERT5:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP39]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT6:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT5]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer ; CHECK-NEXT: br label %[[VEC_EPILOG_VECTOR_BODY:.*]] @@ -222,7 +218,7 @@ define i64 @main_vector_loop_fixed_with_no_remaining_iterations(ptr %src, ptr no ; CHECK-NEXT: store <vscale x 2 x i8> zeroinitializer, ptr [[TMP32]], align 1 ; CHECK-NEXT: [[TMP34:%.*]] = zext <vscale x 2 x i32> [[TMP31]] to <vscale x 2 x i64> ; CHECK-NEXT: [[TMP35]] = or <vscale x 2 x i64> [[VEC_PHI8]], [[TMP34]] -; CHECK-NEXT: [[INDEX_NEXT9]] = add nuw i64 [[INDEX7]], [[TMP21]] +; CHECK-NEXT: [[INDEX_NEXT9]] = add nuw i64 [[INDEX7]], [[TMP17]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT6]] ; CHECK-NEXT: [[TMP36:%.*]] = icmp eq i64 [[INDEX_NEXT9]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP36]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/interleaving-reduction.ll b/llvm/test/Transforms/LoopVectorize/AArch64/interleaving-reduction.ll index f069347..fa8d17c 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/interleaving-reduction.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/interleaving-reduction.ll @@ -145,8 +145,6 @@ define i32 @interleave_integer_reduction(ptr %src, i64 %N) { ; INTERLEAVE-4-VLA-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16 ; INTERLEAVE-4-VLA-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; INTERLEAVE-4-VLA-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; INTERLEAVE-4-VLA-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; INTERLEAVE-4-VLA-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 16 ; INTERLEAVE-4-VLA-NEXT: br label [[VECTOR_BODY:%.*]] ; INTERLEAVE-4-VLA: vector.body: ; INTERLEAVE-4-VLA-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -172,7 +170,7 @@ define i32 @interleave_integer_reduction(ptr %src, i64 %N) { ; INTERLEAVE-4-VLA-NEXT: [[TMP17]] = add <vscale x 4 x i32> [[VEC_PHI1]], [[WIDE_LOAD4]] ; INTERLEAVE-4-VLA-NEXT: [[TMP18]] = add <vscale x 4 x i32> [[VEC_PHI2]], [[WIDE_LOAD5]] ; INTERLEAVE-4-VLA-NEXT: [[TMP19]] = add <vscale x 4 x i32> [[VEC_PHI3]], [[WIDE_LOAD6]] -; INTERLEAVE-4-VLA-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; INTERLEAVE-4-VLA-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; INTERLEAVE-4-VLA-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; INTERLEAVE-4-VLA-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; INTERLEAVE-4-VLA: middle.block: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/low_trip_count_predicates.ll b/llvm/test/Transforms/LoopVectorize/AArch64/low_trip_count_predicates.ll index 5f5d326..a54a404 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/low_trip_count_predicates.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/low_trip_count_predicates.ll @@ -72,8 +72,6 @@ define void @low_vf_ic_is_better(ptr nocapture noundef %p, i32 %tc, i16 noundef ; CHECK-VS1-NEXT: [[TMP17:%.*]] = mul nuw i64 [[TMP16]], 16 ; CHECK-VS1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP3]], [[TMP17]] ; CHECK-VS1-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF]] -; CHECK-VS1-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-VS1-NEXT: [[TMP19:%.*]] = mul nuw i64 [[TMP18]], 16 ; CHECK-VS1-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[CONV]], i64 0 ; CHECK-VS1-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer ; CHECK-VS1-NEXT: br label %[[VECTOR_BODY:.*]] @@ -84,7 +82,7 @@ define void @low_vf_ic_is_better(ptr nocapture noundef %p, i32 %tc, i16 noundef ; CHECK-VS1-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP22]], align 1 ; CHECK-VS1-NEXT: [[TMP24:%.*]] = add <vscale x 16 x i8> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] ; CHECK-VS1-NEXT: store <vscale x 16 x i8> [[TMP24]], ptr [[TMP22]], align 1 -; CHECK-VS1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP19]] +; CHECK-VS1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP17]] ; CHECK-VS1-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-VS1-NEXT: br i1 [[TMP25]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-VS1: [[MIDDLE_BLOCK]]: @@ -103,8 +101,6 @@ define void @low_vf_ic_is_better(ptr nocapture noundef %p, i32 %tc, i16 noundef ; CHECK-VS1-NEXT: [[TMP29:%.*]] = mul nuw i64 [[TMP28]], 8 ; CHECK-VS1-NEXT: [[N_MOD_VF2:%.*]] = urem i64 [[TMP3]], [[TMP29]] ; CHECK-VS1-NEXT: [[N_VEC3:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF2]] -; CHECK-VS1-NEXT: [[TMP30:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-VS1-NEXT: [[TMP31:%.*]] = mul nuw i64 [[TMP30]], 8 ; CHECK-VS1-NEXT: [[TMP39:%.*]] = add i64 [[TMP0]], [[N_VEC3]] ; CHECK-VS1-NEXT: [[BROADCAST_SPLATINSERT7:%.*]] = insertelement <vscale x 8 x i8> poison, i8 [[CONV]], i64 0 ; CHECK-VS1-NEXT: [[BROADCAST_SPLAT8:%.*]] = shufflevector <vscale x 8 x i8> [[BROADCAST_SPLATINSERT7]], <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer @@ -116,7 +112,7 @@ define void @low_vf_ic_is_better(ptr nocapture noundef %p, i32 %tc, i16 noundef ; CHECK-VS1-NEXT: [[WIDE_LOAD6:%.*]] = load <vscale x 8 x i8>, ptr [[TMP33]], align 1 ; CHECK-VS1-NEXT: [[TMP35:%.*]] = add <vscale x 8 x i8> [[WIDE_LOAD6]], [[BROADCAST_SPLAT8]] ; CHECK-VS1-NEXT: store <vscale x 8 x i8> [[TMP35]], ptr [[TMP33]], align 1 -; CHECK-VS1-NEXT: [[INDEX_NEXT9]] = add nuw i64 [[INDEX5]], [[TMP31]] +; CHECK-VS1-NEXT: [[INDEX_NEXT9]] = add nuw i64 [[INDEX5]], [[TMP29]] ; CHECK-VS1-NEXT: [[TMP36:%.*]] = icmp eq i64 [[INDEX_NEXT9]], [[N_VEC3]] ; CHECK-VS1-NEXT: br i1 [[TMP36]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK-VS1: [[VEC_EPILOG_MIDDLE_BLOCK]]: @@ -176,8 +172,6 @@ define void @low_vf_ic_is_better(ptr nocapture noundef %p, i32 %tc, i16 noundef ; CHECK-VS2-NEXT: [[TMP17:%.*]] = mul nuw i64 [[TMP16]], 8 ; CHECK-VS2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP3]], [[TMP17]] ; CHECK-VS2-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF]] -; CHECK-VS2-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-VS2-NEXT: [[TMP19:%.*]] = mul nuw i64 [[TMP18]], 8 ; CHECK-VS2-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i8> poison, i8 [[CONV]], i64 0 ; CHECK-VS2-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 8 x i8> [[BROADCAST_SPLATINSERT]], <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer ; CHECK-VS2-NEXT: br label %[[VECTOR_BODY:.*]] @@ -188,7 +182,7 @@ define void @low_vf_ic_is_better(ptr nocapture noundef %p, i32 %tc, i16 noundef ; CHECK-VS2-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i8>, ptr [[TMP22]], align 1 ; CHECK-VS2-NEXT: [[TMP24:%.*]] = add <vscale x 8 x i8> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] ; CHECK-VS2-NEXT: store <vscale x 8 x i8> [[TMP24]], ptr [[TMP22]], align 1 -; CHECK-VS2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP19]] +; CHECK-VS2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP17]] ; CHECK-VS2-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-VS2-NEXT: br i1 [[TMP25]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-VS2: [[MIDDLE_BLOCK]]: @@ -207,8 +201,6 @@ define void @low_vf_ic_is_better(ptr nocapture noundef %p, i32 %tc, i16 noundef ; CHECK-VS2-NEXT: [[TMP29:%.*]] = mul nuw i64 [[TMP28]], 4 ; CHECK-VS2-NEXT: [[N_MOD_VF2:%.*]] = urem i64 [[TMP3]], [[TMP29]] ; CHECK-VS2-NEXT: [[N_VEC3:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF2]] -; CHECK-VS2-NEXT: [[TMP30:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-VS2-NEXT: [[TMP31:%.*]] = mul nuw i64 [[TMP30]], 4 ; CHECK-VS2-NEXT: [[TMP39:%.*]] = add i64 [[TMP0]], [[N_VEC3]] ; CHECK-VS2-NEXT: [[BROADCAST_SPLATINSERT7:%.*]] = insertelement <vscale x 4 x i8> poison, i8 [[CONV]], i64 0 ; CHECK-VS2-NEXT: [[BROADCAST_SPLAT8:%.*]] = shufflevector <vscale x 4 x i8> [[BROADCAST_SPLATINSERT7]], <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer @@ -220,7 +212,7 @@ define void @low_vf_ic_is_better(ptr nocapture noundef %p, i32 %tc, i16 noundef ; CHECK-VS2-NEXT: [[WIDE_LOAD6:%.*]] = load <vscale x 4 x i8>, ptr [[TMP33]], align 1 ; CHECK-VS2-NEXT: [[TMP35:%.*]] = add <vscale x 4 x i8> [[WIDE_LOAD6]], [[BROADCAST_SPLAT8]] ; CHECK-VS2-NEXT: store <vscale x 4 x i8> [[TMP35]], ptr [[TMP33]], align 1 -; CHECK-VS2-NEXT: [[INDEX_NEXT9]] = add nuw i64 [[INDEX5]], [[TMP31]] +; CHECK-VS2-NEXT: [[INDEX_NEXT9]] = add nuw i64 [[INDEX5]], [[TMP29]] ; CHECK-VS2-NEXT: [[TMP36:%.*]] = icmp eq i64 [[INDEX_NEXT9]], [[N_VEC3]] ; CHECK-VS2-NEXT: br i1 [[TMP36]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK-VS2: [[VEC_EPILOG_MIDDLE_BLOCK]]: @@ -416,12 +408,6 @@ define void @overflow_indvar_known_false(ptr nocapture noundef %p, i32 noundef % ; CHECK: [[VECTOR_PH]]: ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16 -; CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[TMP3]], 1 -; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP1]], [[TMP4]] -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] -; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16 ; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64 0, i64 [[TMP1]]) ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[CONV]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer @@ -434,7 +420,7 @@ define void @overflow_indvar_known_false(ptr nocapture noundef %p, i32 noundef % ; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr [[TMP13]], i32 1, <vscale x 16 x i1> [[ACTIVE_LANE_MASK]], <vscale x 16 x i8> poison) ; CHECK-NEXT: [[TMP15:%.*]] = add <vscale x 16 x i8> [[WIDE_MASKED_LOAD]], [[BROADCAST_SPLAT]] ; CHECK-NEXT: call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[TMP15]], ptr [[TMP13]], i32 1, <vscale x 16 x i1> [[ACTIVE_LANE_MASK]]) -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]] +; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP3]] ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64 [[INDEX_NEXT]], i64 [[TMP1]]) ; CHECK-NEXT: [[TMP16:%.*]] = xor <vscale x 16 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) ; CHECK-NEXT: [[TMP17:%.*]] = extractelement <vscale x 16 x i1> [[TMP16]], i32 0 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll b/llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll index 6029095..32a69f1 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll @@ -18,8 +18,6 @@ define void @test_widen(ptr noalias %a, ptr readnone %b) #4 { ; TFNONE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 ; TFNONE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]] ; TFNONE-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] -; TFNONE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; TFNONE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; TFNONE-NEXT: br label %[[VECTOR_BODY:.*]] ; TFNONE: [[VECTOR_BODY]]: ; TFNONE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -28,7 +26,7 @@ define void @test_widen(ptr noalias %a, ptr readnone %b) #4 { ; TFNONE-NEXT: [[TMP7:%.*]] = call <vscale x 2 x i64> @foo_vector(<vscale x 2 x i64> [[WIDE_LOAD]], <vscale x 2 x i1> splat (i1 true)) ; TFNONE-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] ; TFNONE-NEXT: store <vscale x 2 x i64> [[TMP7]], ptr [[TMP8]], align 8 -; TFNONE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; TFNONE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; TFNONE-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; TFNONE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; TFNONE: [[MIDDLE_BLOCK]]: @@ -141,8 +139,6 @@ define void @test_if_then(ptr noalias %a, ptr readnone %b) #4 { ; TFNONE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 ; TFNONE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]] ; TFNONE-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] -; TFNONE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; TFNONE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; TFNONE-NEXT: br label %[[VECTOR_BODY:.*]] ; TFNONE: [[VECTOR_BODY]]: ; TFNONE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -153,7 +149,7 @@ define void @test_if_then(ptr noalias %a, ptr readnone %b) #4 { ; TFNONE-NEXT: [[PREDPHI:%.*]] = select <vscale x 2 x i1> [[TMP7]], <vscale x 2 x i64> [[TMP8]], <vscale x 2 x i64> zeroinitializer ; TFNONE-NEXT: [[TMP10:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[INDEX]] ; TFNONE-NEXT: store <vscale x 2 x i64> [[PREDPHI]], ptr [[TMP10]], align 8 -; TFNONE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; TFNONE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; TFNONE-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; TFNONE-NEXT: br i1 [[TMP11]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; TFNONE: [[MIDDLE_BLOCK]]: @@ -168,10 +164,10 @@ define void @test_if_then(ptr noalias %a, ptr readnone %b) #4 { ; TFNONE-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP12]], 50 ; TFNONE-NEXT: br i1 [[CMP]], label %[[IF_THEN:.*]], label %[[IF_END]] ; TFNONE: [[IF_THEN]]: -; TFNONE-NEXT: [[TMP13:%.*]] = call i64 @foo(i64 [[TMP12]]) #[[ATTR4]] +; TFNONE-NEXT: [[TMP9:%.*]] = call i64 @foo(i64 [[TMP12]]) #[[ATTR4]] ; TFNONE-NEXT: br label %[[IF_END]] ; TFNONE: [[IF_END]]: -; TFNONE-NEXT: [[TMP14:%.*]] = phi i64 [ [[TMP13]], %[[IF_THEN]] ], [ 0, %[[FOR_BODY]] ] +; TFNONE-NEXT: [[TMP14:%.*]] = phi i64 [ [[TMP9]], %[[IF_THEN]] ], [ 0, %[[FOR_BODY]] ] ; TFNONE-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[INDVARS_IV]] ; TFNONE-NEXT: store i64 [[TMP14]], ptr [[ARRAYIDX1]], align 8 ; TFNONE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 @@ -292,8 +288,6 @@ define void @test_widen_if_then_else(ptr noalias %a, ptr readnone %b) #4 { ; TFNONE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 ; TFNONE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]] ; TFNONE-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] -; TFNONE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; TFNONE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; TFNONE-NEXT: br label %[[VECTOR_BODY:.*]] ; TFNONE: [[VECTOR_BODY]]: ; TFNONE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -306,7 +300,7 @@ define void @test_widen_if_then_else(ptr noalias %a, ptr readnone %b) #4 { ; TFNONE-NEXT: [[PREDPHI:%.*]] = select <vscale x 2 x i1> [[TMP7]], <vscale x 2 x i64> [[TMP10]], <vscale x 2 x i64> [[TMP9]] ; TFNONE-NEXT: [[TMP11:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[INDEX]] ; TFNONE-NEXT: store <vscale x 2 x i64> [[PREDPHI]], ptr [[TMP11]], align 8 -; TFNONE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; TFNONE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; TFNONE-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; TFNONE-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; TFNONE: [[MIDDLE_BLOCK]]: @@ -460,8 +454,6 @@ define void @test_widen_nomask(ptr noalias %a, ptr readnone %b) #4 { ; TFNONE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 ; TFNONE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]] ; TFNONE-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] -; TFNONE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; TFNONE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; TFNONE-NEXT: br label %[[VECTOR_BODY:.*]] ; TFNONE: [[VECTOR_BODY]]: ; TFNONE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -470,7 +462,7 @@ define void @test_widen_nomask(ptr noalias %a, ptr readnone %b) #4 { ; TFNONE-NEXT: [[TMP7:%.*]] = call <vscale x 2 x i64> @foo_vector_nomask(<vscale x 2 x i64> [[WIDE_LOAD]]) ; TFNONE-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] ; TFNONE-NEXT: store <vscale x 2 x i64> [[TMP7]], ptr [[TMP8]], align 8 -; TFNONE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; TFNONE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; TFNONE-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; TFNONE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; TFNONE: [[MIDDLE_BLOCK]]: @@ -515,8 +507,6 @@ define void @test_widen_nomask(ptr noalias %a, ptr readnone %b) #4 { ; TFFALLBACK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 ; TFFALLBACK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]] ; TFFALLBACK-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] -; TFFALLBACK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; TFFALLBACK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; TFFALLBACK-NEXT: br label %[[VECTOR_BODY:.*]] ; TFFALLBACK: [[VECTOR_BODY]]: ; TFFALLBACK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -525,7 +515,7 @@ define void @test_widen_nomask(ptr noalias %a, ptr readnone %b) #4 { ; TFFALLBACK-NEXT: [[TMP7:%.*]] = call <vscale x 2 x i64> @foo_vector_nomask(<vscale x 2 x i64> [[WIDE_LOAD]]) ; TFFALLBACK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] ; TFFALLBACK-NEXT: store <vscale x 2 x i64> [[TMP7]], ptr [[TMP8]], align 8 -; TFFALLBACK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; TFFALLBACK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; TFFALLBACK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; TFFALLBACK-NEXT: br i1 [[TMP9]], label %[[FOR_BODY:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; TFFALLBACK: [[FOR_BODY]]: @@ -589,8 +579,6 @@ define void @test_widen_optmask(ptr noalias %a, ptr readnone %b) #4 { ; TFNONE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 ; TFNONE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]] ; TFNONE-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] -; TFNONE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; TFNONE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; TFNONE-NEXT: br label %[[VECTOR_BODY:.*]] ; TFNONE: [[VECTOR_BODY]]: ; TFNONE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -599,7 +587,7 @@ define void @test_widen_optmask(ptr noalias %a, ptr readnone %b) #4 { ; TFNONE-NEXT: [[TMP7:%.*]] = call <vscale x 2 x i64> @foo_vector_nomask(<vscale x 2 x i64> [[WIDE_LOAD]]) ; TFNONE-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] ; TFNONE-NEXT: store <vscale x 2 x i64> [[TMP7]], ptr [[TMP8]], align 8 -; TFNONE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; TFNONE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; TFNONE-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; TFNONE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; TFNONE: [[MIDDLE_BLOCK]]: @@ -737,8 +725,6 @@ define double @test_widen_fmuladd_and_call(ptr noalias %a, ptr readnone %b, doub ; TFNONE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 ; TFNONE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]] ; TFNONE-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] -; TFNONE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; TFNONE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; TFNONE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x double> poison, double [[M]], i64 0 ; TFNONE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x double> [[BROADCAST_SPLATINSERT]], <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer ; TFNONE-NEXT: br label %[[VECTOR_BODY:.*]] @@ -753,7 +739,7 @@ define double @test_widen_fmuladd_and_call(ptr noalias %a, ptr readnone %b, doub ; TFNONE-NEXT: [[TMP10:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] ; TFNONE-NEXT: store <vscale x 2 x i64> [[TMP9]], ptr [[TMP10]], align 8 ; TFNONE-NEXT: [[TMP11]] = call double @llvm.vector.reduce.fadd.nxv2f64(double [[VEC_PHI]], <vscale x 2 x double> [[TMP7]]) -; TFNONE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; TFNONE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; TFNONE-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; TFNONE-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; TFNONE: [[MIDDLE_BLOCK]]: @@ -926,8 +912,6 @@ define void @test_widen_exp_v2(ptr noalias %p2, ptr noalias %p, i64 %n) #5 { ; TFNONE-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2 ; TFNONE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP4]] ; TFNONE-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]] -; TFNONE-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; TFNONE-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2 ; TFNONE-NEXT: br label %[[VECTOR_BODY:.*]] ; TFNONE: [[VECTOR_BODY]]: ; TFNONE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -942,7 +926,7 @@ define void @test_widen_exp_v2(ptr noalias %p2, ptr noalias %p, i64 %n) #5 { ; TFNONE-NEXT: [[TMP13:%.*]] = sub i32 [[TMP12]], 1 ; TFNONE-NEXT: [[TMP14:%.*]] = extractelement <vscale x 2 x double> [[PREDPHI]], i32 [[TMP13]] ; TFNONE-NEXT: store double [[TMP14]], ptr [[P]], align 8 -; TFNONE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] +; TFNONE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]] ; TFNONE-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; TFNONE-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; TFNONE: [[MIDDLE_BLOCK]]: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/optsize_minsize.ll b/llvm/test/Transforms/LoopVectorize/AArch64/optsize_minsize.ll index cc36cdb..3ab7171 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/optsize_minsize.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/optsize_minsize.ll @@ -495,12 +495,6 @@ define void @sve_tail_predicate_without_minsize(ptr %p, i8 %a, i8 %b, i8 %c, i32 ; DEFAULT: [[VECTOR_PH]]: ; DEFAULT-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; DEFAULT-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 16 -; DEFAULT-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 -; DEFAULT-NEXT: [[N_RND_UP:%.*]] = add i64 15, [[TMP2]] -; DEFAULT-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; DEFAULT-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] -; DEFAULT-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() -; DEFAULT-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 16 ; DEFAULT-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() ; DEFAULT-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16 ; DEFAULT-NEXT: [[TMP7:%.*]] = sub i64 15, [[TMP6]] @@ -516,7 +510,7 @@ define void @sve_tail_predicate_without_minsize(ptr %p, i8 %a, i8 %b, i8 %c, i32 ; DEFAULT-NEXT: [[TMP10:%.*]] = call <vscale x 16 x i8> @llvm.stepvector.nxv16i8() ; DEFAULT-NEXT: [[TMP11:%.*]] = mul <vscale x 16 x i8> [[TMP10]], splat (i8 1) ; DEFAULT-NEXT: [[INDUCTION:%.*]] = add <vscale x 16 x i8> zeroinitializer, [[TMP11]] -; DEFAULT-NEXT: [[TMP12:%.*]] = trunc i64 [[TMP4]] to i8 +; DEFAULT-NEXT: [[TMP12:%.*]] = trunc i64 [[TMP1]] to i8 ; DEFAULT-NEXT: [[TMP13:%.*]] = mul i8 1, [[TMP12]] ; DEFAULT-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[TMP13]], i64 0 ; DEFAULT-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer @@ -534,7 +528,7 @@ define void @sve_tail_predicate_without_minsize(ptr %p, i8 %a, i8 %b, i8 %c, i32 ; DEFAULT-NEXT: [[TMP21:%.*]] = add <vscale x 16 x i8> [[TMP18]], [[TMP20]] ; DEFAULT-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[INDEX]] ; DEFAULT-NEXT: call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[TMP21]], ptr [[TMP22]], i32 1, <vscale x 16 x i1> [[ACTIVE_LANE_MASK]]) -; DEFAULT-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP4]] +; DEFAULT-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]] ; DEFAULT-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64 [[INDEX]], i64 [[TMP9]]) ; DEFAULT-NEXT: [[TMP24:%.*]] = xor <vscale x 16 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) ; DEFAULT-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i8> [[VEC_IND]], [[DOTSPLAT]] @@ -570,12 +564,6 @@ define void @sve_tail_predicate_without_minsize(ptr %p, i8 %a, i8 %b, i8 %c, i32 ; OPTSIZE: [[VECTOR_PH]]: ; OPTSIZE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; OPTSIZE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 16 -; OPTSIZE-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 -; OPTSIZE-NEXT: [[N_RND_UP:%.*]] = add i64 15, [[TMP2]] -; OPTSIZE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; OPTSIZE-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] -; OPTSIZE-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() -; OPTSIZE-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 16 ; OPTSIZE-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() ; OPTSIZE-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16 ; OPTSIZE-NEXT: [[TMP7:%.*]] = sub i64 15, [[TMP6]] @@ -591,7 +579,7 @@ define void @sve_tail_predicate_without_minsize(ptr %p, i8 %a, i8 %b, i8 %c, i32 ; OPTSIZE-NEXT: [[TMP10:%.*]] = call <vscale x 16 x i8> @llvm.stepvector.nxv16i8() ; OPTSIZE-NEXT: [[TMP11:%.*]] = mul <vscale x 16 x i8> [[TMP10]], splat (i8 1) ; OPTSIZE-NEXT: [[INDUCTION:%.*]] = add <vscale x 16 x i8> zeroinitializer, [[TMP11]] -; OPTSIZE-NEXT: [[TMP12:%.*]] = trunc i64 [[TMP4]] to i8 +; OPTSIZE-NEXT: [[TMP12:%.*]] = trunc i64 [[TMP1]] to i8 ; OPTSIZE-NEXT: [[TMP13:%.*]] = mul i8 1, [[TMP12]] ; OPTSIZE-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[TMP13]], i64 0 ; OPTSIZE-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer @@ -609,7 +597,7 @@ define void @sve_tail_predicate_without_minsize(ptr %p, i8 %a, i8 %b, i8 %c, i32 ; OPTSIZE-NEXT: [[TMP21:%.*]] = add <vscale x 16 x i8> [[TMP18]], [[TMP20]] ; OPTSIZE-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[INDEX]] ; OPTSIZE-NEXT: call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[TMP21]], ptr [[TMP22]], i32 1, <vscale x 16 x i1> [[ACTIVE_LANE_MASK]]) -; OPTSIZE-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP4]] +; OPTSIZE-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]] ; OPTSIZE-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64 [[INDEX]], i64 [[TMP9]]) ; OPTSIZE-NEXT: [[TMP24:%.*]] = xor <vscale x 16 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) ; OPTSIZE-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i8> [[VEC_IND]], [[DOTSPLAT]] @@ -645,12 +633,6 @@ define void @sve_tail_predicate_without_minsize(ptr %p, i8 %a, i8 %b, i8 %c, i32 ; MINSIZE: [[VECTOR_PH]]: ; MINSIZE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; MINSIZE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 16 -; MINSIZE-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 -; MINSIZE-NEXT: [[N_RND_UP:%.*]] = add i64 15, [[TMP2]] -; MINSIZE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; MINSIZE-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] -; MINSIZE-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() -; MINSIZE-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 16 ; MINSIZE-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() ; MINSIZE-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16 ; MINSIZE-NEXT: [[TMP7:%.*]] = sub i64 15, [[TMP6]] @@ -666,7 +648,7 @@ define void @sve_tail_predicate_without_minsize(ptr %p, i8 %a, i8 %b, i8 %c, i32 ; MINSIZE-NEXT: [[TMP10:%.*]] = call <vscale x 16 x i8> @llvm.stepvector.nxv16i8() ; MINSIZE-NEXT: [[TMP11:%.*]] = mul <vscale x 16 x i8> [[TMP10]], splat (i8 1) ; MINSIZE-NEXT: [[INDUCTION:%.*]] = add <vscale x 16 x i8> zeroinitializer, [[TMP11]] -; MINSIZE-NEXT: [[TMP12:%.*]] = trunc i64 [[TMP4]] to i8 +; MINSIZE-NEXT: [[TMP12:%.*]] = trunc i64 [[TMP1]] to i8 ; MINSIZE-NEXT: [[TMP13:%.*]] = mul i8 1, [[TMP12]] ; MINSIZE-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[TMP13]], i64 0 ; MINSIZE-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer @@ -684,7 +666,7 @@ define void @sve_tail_predicate_without_minsize(ptr %p, i8 %a, i8 %b, i8 %c, i32 ; MINSIZE-NEXT: [[TMP21:%.*]] = add <vscale x 16 x i8> [[TMP18]], [[TMP20]] ; MINSIZE-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[INDEX]] ; MINSIZE-NEXT: call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[TMP21]], ptr [[TMP22]], i32 1, <vscale x 16 x i1> [[ACTIVE_LANE_MASK]]) -; MINSIZE-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP4]] +; MINSIZE-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]] ; MINSIZE-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64 [[INDEX]], i64 [[TMP9]]) ; MINSIZE-NEXT: [[TMP24:%.*]] = xor <vscale x 16 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) ; MINSIZE-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i8> [[VEC_IND]], [[DOTSPLAT]] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/outer_loop_prefer_scalable.ll b/llvm/test/Transforms/LoopVectorize/AArch64/outer_loop_prefer_scalable.ll index 2cec6ca..f284afc 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/outer_loop_prefer_scalable.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/outer_loop_prefer_scalable.ll @@ -18,38 +18,36 @@ define void @foo() { ; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] -; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64() -; CHECK-NEXT: [[TMP7:%.*]] = mul <vscale x 4 x i64> [[TMP6]], splat (i64 1) -; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP7]] -; CHECK-NEXT: [[TMP8:%.*]] = mul i64 1, [[TMP5]] -; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP8]], i64 0 -; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer +; CHECK-NEXT: [[TMP4:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64() +; CHECK-NEXT: [[TMP5:%.*]] = mul <vscale x 4 x i64> [[TMP4]], splat (i64 1) +; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP5]] +; CHECK-NEXT: [[TMP6:%.*]] = mul i64 1, [[TMP3]] +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP6]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_LATCH:%.*]] ] ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_LATCH]] ] -; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1024 x float], ptr @A, i64 0, <vscale x 4 x i64> [[VEC_IND]] -; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x float> @llvm.masked.gather.nxv4f32.nxv4p0(<vscale x 4 x ptr> [[TMP9]], i32 4, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x float> poison) +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1024 x float], ptr @A, i64 0, <vscale x 4 x i64> [[VEC_IND]] +; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x float> @llvm.masked.gather.nxv4f32.nxv4p0(<vscale x 4 x ptr> [[TMP7]], i32 4, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x float> poison) ; CHECK-NEXT: br label [[INNER_LOOP1:%.*]] ; CHECK: inner_loop1: -; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i64> [ zeroinitializer, [[VECTOR_BODY]] ], [ [[TMP12:%.*]], [[INNER_LOOP1]] ] -; CHECK-NEXT: [[VEC_PHI2:%.*]] = phi <vscale x 4 x float> [ [[WIDE_MASKED_GATHER]], [[VECTOR_BODY]] ], [ [[TMP11:%.*]], [[INNER_LOOP1]] ] -; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [512 x float], ptr @B, i64 0, <vscale x 4 x i64> [[VEC_PHI]] -; CHECK-NEXT: [[WIDE_MASKED_GATHER3:%.*]] = call <vscale x 4 x float> @llvm.masked.gather.nxv4f32.nxv4p0(<vscale x 4 x ptr> [[TMP10]], i32 4, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x float> poison) -; CHECK-NEXT: [[TMP11]] = fmul <vscale x 4 x float> [[VEC_PHI2]], [[WIDE_MASKED_GATHER3]] -; CHECK-NEXT: [[TMP12]] = add nuw nsw <vscale x 4 x i64> [[VEC_PHI]], splat (i64 1) -; CHECK-NEXT: [[TMP13:%.*]] = icmp eq <vscale x 4 x i64> [[TMP12]], splat (i64 512) -; CHECK-NEXT: [[TMP14:%.*]] = extractelement <vscale x 4 x i1> [[TMP13]], i32 0 -; CHECK-NEXT: br i1 [[TMP14]], label [[VECTOR_LATCH]], label [[INNER_LOOP1]] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i64> [ zeroinitializer, [[VECTOR_BODY]] ], [ [[TMP10:%.*]], [[INNER_LOOP1]] ] +; CHECK-NEXT: [[VEC_PHI2:%.*]] = phi <vscale x 4 x float> [ [[WIDE_MASKED_GATHER]], [[VECTOR_BODY]] ], [ [[TMP9:%.*]], [[INNER_LOOP1]] ] +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [512 x float], ptr @B, i64 0, <vscale x 4 x i64> [[VEC_PHI]] +; CHECK-NEXT: [[WIDE_MASKED_GATHER3:%.*]] = call <vscale x 4 x float> @llvm.masked.gather.nxv4f32.nxv4p0(<vscale x 4 x ptr> [[TMP8]], i32 4, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x float> poison) +; CHECK-NEXT: [[TMP9]] = fmul <vscale x 4 x float> [[VEC_PHI2]], [[WIDE_MASKED_GATHER3]] +; CHECK-NEXT: [[TMP10]] = add nuw nsw <vscale x 4 x i64> [[VEC_PHI]], splat (i64 1) +; CHECK-NEXT: [[TMP11:%.*]] = icmp eq <vscale x 4 x i64> [[TMP10]], splat (i64 512) +; CHECK-NEXT: [[TMP12:%.*]] = extractelement <vscale x 4 x i1> [[TMP11]], i32 0 +; CHECK-NEXT: br i1 [[TMP12]], label [[VECTOR_LATCH]], label [[INNER_LOOP1]] ; CHECK: vector.latch: -; CHECK-NEXT: [[VEC_PHI4:%.*]] = phi <vscale x 4 x float> [ [[TMP11]], [[INNER_LOOP1]] ] -; CHECK-NEXT: call void @llvm.masked.scatter.nxv4f32.nxv4p0(<vscale x 4 x float> [[VEC_PHI4]], <vscale x 4 x ptr> [[TMP9]], i32 4, <vscale x 4 x i1> splat (i1 true)) -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]] -; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-NEXT: [[VEC_PHI4:%.*]] = phi <vscale x 4 x float> [ [[TMP9]], [[INNER_LOOP1]] ] +; CHECK-NEXT: call void @llvm.masked.scatter.nxv4f32.nxv4p0(<vscale x 4 x float> [[VEC_PHI4]], <vscale x 4 x ptr> [[TMP7]], i32 4, <vscale x 4 x i1> splat (i1 true)) +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] +; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-chained.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-chained.ll index 787d63c..7232fe5 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-chained.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-chained.ll @@ -61,8 +61,6 @@ define i32 @chained_partial_reduce_add_sub(ptr %a, ptr %b, ptr %c, i32 %N) #0 { ; CHECK-SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]] ; CHECK-SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]] -; CHECK-SVE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-SVE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-SVE-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-SVE: vector.body: ; CHECK-SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -80,7 +78,7 @@ define i32 @chained_partial_reduce_add_sub(ptr %a, ptr %b, ptr %c, i32 %N) #0 { ; CHECK-SVE-NEXT: [[TMP17:%.*]] = add <vscale x 4 x i32> [[VEC_PHI]], [[TMP16]] ; CHECK-SVE-NEXT: [[TMP18:%.*]] = mul nsw <vscale x 4 x i32> [[TMP13]], [[TMP15]] ; CHECK-SVE-NEXT: [[TMP19]] = sub <vscale x 4 x i32> [[TMP17]], [[TMP18]] -; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-SVE-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-SVE: middle.block: @@ -104,8 +102,6 @@ define i32 @chained_partial_reduce_add_sub(ptr %a, ptr %b, ptr %c, i32 %N) #0 { ; CHECK-SVE-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 ; CHECK-SVE-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]] ; CHECK-SVE-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]] -; CHECK-SVE-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-SVE-MAXBW-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8 ; CHECK-SVE-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-SVE-MAXBW: vector.body: ; CHECK-SVE-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -124,7 +120,7 @@ define i32 @chained_partial_reduce_add_sub(ptr %a, ptr %b, ptr %c, i32 %N) #0 { ; CHECK-SVE-MAXBW-NEXT: [[TMP17:%.*]] = mul nsw <vscale x 8 x i32> [[TMP13]], [[TMP15]] ; CHECK-SVE-MAXBW-NEXT: [[TMP18:%.*]] = sub <vscale x 8 x i32> zeroinitializer, [[TMP17]] ; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE3]] = call <vscale x 2 x i32> @llvm.experimental.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[PARTIAL_REDUCE]], <vscale x 8 x i32> [[TMP18]]) -; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-SVE-MAXBW-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-SVE-MAXBW-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-SVE-MAXBW: middle.block: @@ -217,8 +213,6 @@ define i32 @chained_partial_reduce_add_add(ptr %a, ptr %b, ptr %c, i32 %N) #0 { ; CHECK-SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]] ; CHECK-SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]] -; CHECK-SVE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-SVE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-SVE-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-SVE: vector.body: ; CHECK-SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -236,7 +230,7 @@ define i32 @chained_partial_reduce_add_add(ptr %a, ptr %b, ptr %c, i32 %N) #0 { ; CHECK-SVE-NEXT: [[TMP17:%.*]] = add <vscale x 4 x i32> [[VEC_PHI]], [[TMP16]] ; CHECK-SVE-NEXT: [[TMP18:%.*]] = mul nsw <vscale x 4 x i32> [[TMP13]], [[TMP15]] ; CHECK-SVE-NEXT: [[TMP19]] = add <vscale x 4 x i32> [[TMP17]], [[TMP18]] -; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-SVE-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK-SVE: middle.block: @@ -260,8 +254,6 @@ define i32 @chained_partial_reduce_add_add(ptr %a, ptr %b, ptr %c, i32 %N) #0 { ; CHECK-SVE-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 ; CHECK-SVE-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]] ; CHECK-SVE-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]] -; CHECK-SVE-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-SVE-MAXBW-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8 ; CHECK-SVE-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-SVE-MAXBW: vector.body: ; CHECK-SVE-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -279,7 +271,7 @@ define i32 @chained_partial_reduce_add_add(ptr %a, ptr %b, ptr %c, i32 %N) #0 { ; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI]], <vscale x 8 x i32> [[TMP16]]) ; CHECK-SVE-MAXBW-NEXT: [[TMP17:%.*]] = mul nsw <vscale x 8 x i32> [[TMP13]], [[TMP15]] ; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE3]] = call <vscale x 2 x i32> @llvm.experimental.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[PARTIAL_REDUCE]], <vscale x 8 x i32> [[TMP17]]) -; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-SVE-MAXBW-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-SVE-MAXBW-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK-SVE-MAXBW: middle.block: @@ -373,8 +365,6 @@ define i32 @chained_partial_reduce_sub_add(ptr %a, ptr %b, ptr %c, i32 %N) #0 { ; CHECK-SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]] ; CHECK-SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]] -; CHECK-SVE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-SVE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-SVE-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-SVE: vector.body: ; CHECK-SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -392,7 +382,7 @@ define i32 @chained_partial_reduce_sub_add(ptr %a, ptr %b, ptr %c, i32 %N) #0 { ; CHECK-SVE-NEXT: [[TMP17:%.*]] = sub <vscale x 4 x i32> [[VEC_PHI]], [[TMP16]] ; CHECK-SVE-NEXT: [[TMP18:%.*]] = mul nsw <vscale x 4 x i32> [[TMP13]], [[TMP15]] ; CHECK-SVE-NEXT: [[TMP19]] = add <vscale x 4 x i32> [[TMP17]], [[TMP18]] -; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-SVE-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK-SVE: middle.block: @@ -416,8 +406,6 @@ define i32 @chained_partial_reduce_sub_add(ptr %a, ptr %b, ptr %c, i32 %N) #0 { ; CHECK-SVE-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 ; CHECK-SVE-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]] ; CHECK-SVE-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]] -; CHECK-SVE-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-SVE-MAXBW-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8 ; CHECK-SVE-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-SVE-MAXBW: vector.body: ; CHECK-SVE-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -436,7 +424,7 @@ define i32 @chained_partial_reduce_sub_add(ptr %a, ptr %b, ptr %c, i32 %N) #0 { ; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI]], <vscale x 8 x i32> [[TMP17]]) ; CHECK-SVE-MAXBW-NEXT: [[TMP18:%.*]] = mul nsw <vscale x 8 x i32> [[TMP13]], [[TMP15]] ; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE3]] = call <vscale x 2 x i32> @llvm.experimental.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[PARTIAL_REDUCE]], <vscale x 8 x i32> [[TMP18]]) -; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-SVE-MAXBW-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-SVE-MAXBW-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK-SVE-MAXBW: middle.block: @@ -533,8 +521,6 @@ define i32 @chained_partial_reduce_sub_sub(ptr %a, ptr %b, ptr %c, i32 %N) #0 { ; CHECK-SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]] ; CHECK-SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]] -; CHECK-SVE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-SVE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-SVE-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-SVE: vector.body: ; CHECK-SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -552,7 +538,7 @@ define i32 @chained_partial_reduce_sub_sub(ptr %a, ptr %b, ptr %c, i32 %N) #0 { ; CHECK-SVE-NEXT: [[TMP17:%.*]] = sub <vscale x 4 x i32> [[VEC_PHI]], [[TMP16]] ; CHECK-SVE-NEXT: [[TMP18:%.*]] = mul nsw <vscale x 4 x i32> [[TMP13]], [[TMP15]] ; CHECK-SVE-NEXT: [[TMP19]] = sub <vscale x 4 x i32> [[TMP17]], [[TMP18]] -; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-SVE-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK-SVE: middle.block: @@ -576,8 +562,6 @@ define i32 @chained_partial_reduce_sub_sub(ptr %a, ptr %b, ptr %c, i32 %N) #0 { ; CHECK-SVE-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 ; CHECK-SVE-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]] ; CHECK-SVE-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]] -; CHECK-SVE-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-SVE-MAXBW-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8 ; CHECK-SVE-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-SVE-MAXBW: vector.body: ; CHECK-SVE-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -597,7 +581,7 @@ define i32 @chained_partial_reduce_sub_sub(ptr %a, ptr %b, ptr %c, i32 %N) #0 { ; CHECK-SVE-MAXBW-NEXT: [[TMP18:%.*]] = mul nsw <vscale x 8 x i32> [[TMP13]], [[TMP15]] ; CHECK-SVE-MAXBW-NEXT: [[TMP19:%.*]] = sub <vscale x 8 x i32> zeroinitializer, [[TMP18]] ; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE3]] = call <vscale x 2 x i32> @llvm.experimental.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[PARTIAL_REDUCE]], <vscale x 8 x i32> [[TMP19]]) -; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-SVE-MAXBW-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-SVE-MAXBW-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK-SVE-MAXBW: middle.block: @@ -695,8 +679,6 @@ define i32 @chained_partial_reduce_add_add_add(ptr %a, ptr %b, ptr %c, i32 %N) # ; CHECK-SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]] ; CHECK-SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]] -; CHECK-SVE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-SVE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-SVE-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-SVE: vector.body: ; CHECK-SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -716,7 +698,7 @@ define i32 @chained_partial_reduce_add_add_add(ptr %a, ptr %b, ptr %c, i32 %N) # ; CHECK-SVE-NEXT: [[TMP19:%.*]] = add <vscale x 4 x i32> [[TMP17]], [[TMP18]] ; CHECK-SVE-NEXT: [[TMP20:%.*]] = mul nsw <vscale x 4 x i32> [[TMP14]], [[TMP15]] ; CHECK-SVE-NEXT: [[TMP21]] = add <vscale x 4 x i32> [[TMP19]], [[TMP20]] -; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-SVE-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK-SVE: middle.block: @@ -740,8 +722,6 @@ define i32 @chained_partial_reduce_add_add_add(ptr %a, ptr %b, ptr %c, i32 %N) # ; CHECK-SVE-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 ; CHECK-SVE-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]] ; CHECK-SVE-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]] -; CHECK-SVE-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-SVE-MAXBW-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8 ; CHECK-SVE-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-SVE-MAXBW: vector.body: ; CHECK-SVE-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -761,7 +741,7 @@ define i32 @chained_partial_reduce_add_add_add(ptr %a, ptr %b, ptr %c, i32 %N) # ; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE3:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[PARTIAL_REDUCE]], <vscale x 8 x i32> [[TMP17]]) ; CHECK-SVE-MAXBW-NEXT: [[TMP18:%.*]] = mul nsw <vscale x 8 x i32> [[TMP14]], [[TMP15]] ; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE4]] = call <vscale x 2 x i32> @llvm.experimental.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[PARTIAL_REDUCE3]], <vscale x 8 x i32> [[TMP18]]) -; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-SVE-MAXBW-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-SVE-MAXBW-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK-SVE-MAXBW: middle.block: @@ -863,8 +843,6 @@ define i32 @chained_partial_reduce_sub_add_sub(ptr %a, ptr %b, ptr %c, i32 %N) # ; CHECK-SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]] ; CHECK-SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]] -; CHECK-SVE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-SVE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-SVE-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-SVE: vector.body: ; CHECK-SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -884,7 +862,7 @@ define i32 @chained_partial_reduce_sub_add_sub(ptr %a, ptr %b, ptr %c, i32 %N) # ; CHECK-SVE-NEXT: [[TMP19:%.*]] = add <vscale x 4 x i32> [[TMP17]], [[TMP18]] ; CHECK-SVE-NEXT: [[TMP20:%.*]] = mul nsw <vscale x 4 x i32> [[TMP14]], [[TMP15]] ; CHECK-SVE-NEXT: [[TMP21]] = sub <vscale x 4 x i32> [[TMP19]], [[TMP20]] -; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-SVE-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK-SVE: middle.block: @@ -908,8 +886,6 @@ define i32 @chained_partial_reduce_sub_add_sub(ptr %a, ptr %b, ptr %c, i32 %N) # ; CHECK-SVE-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 ; CHECK-SVE-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]] ; CHECK-SVE-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]] -; CHECK-SVE-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-SVE-MAXBW-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8 ; CHECK-SVE-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-SVE-MAXBW: vector.body: ; CHECK-SVE-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -931,7 +907,7 @@ define i32 @chained_partial_reduce_sub_add_sub(ptr %a, ptr %b, ptr %c, i32 %N) # ; CHECK-SVE-MAXBW-NEXT: [[TMP19:%.*]] = mul nsw <vscale x 8 x i32> [[TMP14]], [[TMP15]] ; CHECK-SVE-MAXBW-NEXT: [[TMP20:%.*]] = sub <vscale x 8 x i32> zeroinitializer, [[TMP19]] ; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE4]] = call <vscale x 2 x i32> @llvm.experimental.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[PARTIAL_REDUCE3]], <vscale x 8 x i32> [[TMP20]]) -; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-SVE-MAXBW-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-SVE-MAXBW-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK-SVE-MAXBW: middle.block: @@ -1029,8 +1005,6 @@ define i32 @chained_partial_reduce_madd_extadd(ptr %a, ptr %b, ptr %c, i32 %N) # ; CHECK-SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]] ; CHECK-SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]] -; CHECK-SVE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-SVE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-SVE-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-SVE: vector.body: ; CHECK-SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -1047,7 +1021,7 @@ define i32 @chained_partial_reduce_madd_extadd(ptr %a, ptr %b, ptr %c, i32 %N) # ; CHECK-SVE-NEXT: [[TMP16:%.*]] = mul nsw <vscale x 4 x i32> [[TMP13]], [[TMP14]] ; CHECK-SVE-NEXT: [[TMP17:%.*]] = add <vscale x 4 x i32> [[VEC_PHI]], [[TMP16]] ; CHECK-SVE-NEXT: [[TMP18]] = add <vscale x 4 x i32> [[TMP17]], [[TMP15]] -; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-SVE-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK-SVE: middle.block: @@ -1071,8 +1045,6 @@ define i32 @chained_partial_reduce_madd_extadd(ptr %a, ptr %b, ptr %c, i32 %N) # ; CHECK-SVE-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 ; CHECK-SVE-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]] ; CHECK-SVE-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]] -; CHECK-SVE-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-SVE-MAXBW-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8 ; CHECK-SVE-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-SVE-MAXBW: vector.body: ; CHECK-SVE-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -1089,7 +1061,7 @@ define i32 @chained_partial_reduce_madd_extadd(ptr %a, ptr %b, ptr %c, i32 %N) # ; CHECK-SVE-MAXBW-NEXT: [[TMP16:%.*]] = mul nsw <vscale x 8 x i32> [[TMP13]], [[TMP14]] ; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI]], <vscale x 8 x i32> [[TMP16]]) ; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE3]] = call <vscale x 2 x i32> @llvm.experimental.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[PARTIAL_REDUCE]], <vscale x 8 x i32> [[TMP15]]) -; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-SVE-MAXBW-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-SVE-MAXBW-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK-SVE-MAXBW: middle.block: @@ -1177,8 +1149,6 @@ define i32 @chained_partial_reduce_extadd_extadd(ptr %a, ptr %b, i32 %N) #0 { ; CHECK-SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]] ; CHECK-SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]] -; CHECK-SVE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-SVE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-SVE-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-SVE: vector.body: ; CHECK-SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -1191,7 +1161,7 @@ define i32 @chained_partial_reduce_extadd_extadd(ptr %a, ptr %b, i32 %N) #0 { ; CHECK-SVE-NEXT: [[TMP12:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD1]] to <vscale x 4 x i32> ; CHECK-SVE-NEXT: [[TMP13:%.*]] = add <vscale x 4 x i32> [[VEC_PHI]], [[TMP11]] ; CHECK-SVE-NEXT: [[TMP14]] = add <vscale x 4 x i32> [[TMP13]], [[TMP12]] -; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-SVE-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; CHECK-SVE: middle.block: @@ -1215,8 +1185,6 @@ define i32 @chained_partial_reduce_extadd_extadd(ptr %a, ptr %b, i32 %N) #0 { ; CHECK-SVE-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 ; CHECK-SVE-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]] ; CHECK-SVE-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]] -; CHECK-SVE-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-SVE-MAXBW-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8 ; CHECK-SVE-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-SVE-MAXBW: vector.body: ; CHECK-SVE-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -1229,7 +1197,7 @@ define i32 @chained_partial_reduce_extadd_extadd(ptr %a, ptr %b, i32 %N) #0 { ; CHECK-SVE-MAXBW-NEXT: [[TMP12:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD1]] to <vscale x 8 x i32> ; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI]], <vscale x 8 x i32> [[TMP11]]) ; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE2]] = call <vscale x 2 x i32> @llvm.experimental.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[PARTIAL_REDUCE]], <vscale x 8 x i32> [[TMP12]]) -; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-SVE-MAXBW-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-SVE-MAXBW-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; CHECK-SVE-MAXBW: middle.block: @@ -1318,8 +1286,6 @@ define i32 @chained_partial_reduce_extadd_madd(ptr %a, ptr %b, ptr %c, i32 %N) # ; CHECK-SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]] ; CHECK-SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]] -; CHECK-SVE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-SVE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-SVE-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-SVE: vector.body: ; CHECK-SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -1336,7 +1302,7 @@ define i32 @chained_partial_reduce_extadd_madd(ptr %a, ptr %b, ptr %c, i32 %N) # ; CHECK-SVE-NEXT: [[TMP16:%.*]] = add <vscale x 4 x i32> [[VEC_PHI]], [[TMP15]] ; CHECK-SVE-NEXT: [[TMP17:%.*]] = mul nsw <vscale x 4 x i32> [[TMP13]], [[TMP14]] ; CHECK-SVE-NEXT: [[TMP18]] = add <vscale x 4 x i32> [[TMP16]], [[TMP17]] -; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-SVE-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; CHECK-SVE: middle.block: @@ -1360,8 +1326,6 @@ define i32 @chained_partial_reduce_extadd_madd(ptr %a, ptr %b, ptr %c, i32 %N) # ; CHECK-SVE-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 ; CHECK-SVE-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]] ; CHECK-SVE-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]] -; CHECK-SVE-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-SVE-MAXBW-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8 ; CHECK-SVE-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-SVE-MAXBW: vector.body: ; CHECK-SVE-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -1378,7 +1342,7 @@ define i32 @chained_partial_reduce_extadd_madd(ptr %a, ptr %b, ptr %c, i32 %N) # ; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI]], <vscale x 8 x i32> [[TMP15]]) ; CHECK-SVE-MAXBW-NEXT: [[TMP16:%.*]] = mul nsw <vscale x 8 x i32> [[TMP13]], [[TMP14]] ; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE3]] = call <vscale x 2 x i32> @llvm.experimental.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[PARTIAL_REDUCE]], <vscale x 8 x i32> [[TMP16]]) -; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-SVE-MAXBW-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-SVE-MAXBW-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; CHECK-SVE-MAXBW: middle.block: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-mixed.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-mixed.ll index d01effd..368cb18e 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-mixed.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-mixed.ll @@ -17,8 +17,6 @@ define i32 @sudot(ptr %a, ptr %b) #0 { ; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] -; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 16 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -44,7 +42,7 @@ define i32 @sudot(ptr %a, ptr %b) #0 { ; CHECK-NEXT: [[TMP21:%.*]] = mul <vscale x 8 x i32> [[TMP19]], [[TMP12]] ; CHECK-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 2 x i32> @llvm.experimental.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI]], <vscale x 8 x i32> [[TMP20]]) ; CHECK-NEXT: [[PARTIAL_REDUCE5]] = call <vscale x 2 x i32> @llvm.experimental.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI1]], <vscale x 8 x i32> [[TMP21]]) -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: @@ -65,8 +63,6 @@ define i32 @sudot(ptr %a, ptr %b) #0 { ; CHECK-NOI8MM-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16 ; CHECK-NOI8MM-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] ; CHECK-NOI8MM-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] -; CHECK-NOI8MM-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NOI8MM-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 16 ; CHECK-NOI8MM-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-NOI8MM: vector.body: ; CHECK-NOI8MM-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -92,7 +88,7 @@ define i32 @sudot(ptr %a, ptr %b) #0 { ; CHECK-NOI8MM-NEXT: [[TMP21:%.*]] = mul <vscale x 8 x i32> [[TMP19]], [[TMP12]] ; CHECK-NOI8MM-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 2 x i32> @llvm.experimental.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI]], <vscale x 8 x i32> [[TMP20]]) ; CHECK-NOI8MM-NEXT: [[PARTIAL_REDUCE5]] = call <vscale x 2 x i32> @llvm.experimental.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI1]], <vscale x 8 x i32> [[TMP21]]) -; CHECK-NOI8MM-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-NOI8MM-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-NOI8MM-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NOI8MM-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-NOI8MM: middle.block: @@ -136,8 +132,6 @@ define i32 @usdot(ptr %a, ptr %b) #0 { ; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] -; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 16 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -163,7 +157,7 @@ define i32 @usdot(ptr %a, ptr %b) #0 { ; CHECK-NEXT: [[TMP21:%.*]] = mul <vscale x 8 x i32> [[TMP19]], [[TMP12]] ; CHECK-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 2 x i32> @llvm.experimental.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI]], <vscale x 8 x i32> [[TMP20]]) ; CHECK-NEXT: [[PARTIAL_REDUCE5]] = call <vscale x 2 x i32> @llvm.experimental.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI1]], <vscale x 8 x i32> [[TMP21]]) -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: @@ -184,8 +178,6 @@ define i32 @usdot(ptr %a, ptr %b) #0 { ; CHECK-NOI8MM-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16 ; CHECK-NOI8MM-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] ; CHECK-NOI8MM-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] -; CHECK-NOI8MM-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NOI8MM-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 16 ; CHECK-NOI8MM-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-NOI8MM: vector.body: ; CHECK-NOI8MM-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -211,7 +203,7 @@ define i32 @usdot(ptr %a, ptr %b) #0 { ; CHECK-NOI8MM-NEXT: [[TMP21:%.*]] = mul <vscale x 8 x i32> [[TMP19]], [[TMP12]] ; CHECK-NOI8MM-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 2 x i32> @llvm.experimental.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI]], <vscale x 8 x i32> [[TMP20]]) ; CHECK-NOI8MM-NEXT: [[PARTIAL_REDUCE5]] = call <vscale x 2 x i32> @llvm.experimental.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI1]], <vscale x 8 x i32> [[TMP21]]) -; CHECK-NOI8MM-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-NOI8MM-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-NOI8MM-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NOI8MM-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK-NOI8MM: middle.block: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll index 195101f..0f0713d 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll @@ -18,8 +18,6 @@ define i32 @dotp(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] ; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] -; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT1:%.*]], [[VECTOR_BODY]] ] @@ -32,7 +30,7 @@ define i32 @dotp(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP18:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD1]] to <vscale x 4 x i32> ; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = mul <vscale x 4 x i32> [[TMP18]], [[TMP9]] ; CHECK-INTERLEAVE1-NEXT: [[TMP14]] = add <vscale x 4 x i32> [[TMP13]], [[VEC_PHI]] -; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT1]] = add nuw i64 [[INDEX1]], [[TMP5]] +; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT1]] = add nuw i64 [[INDEX1]], [[TMP3]] ; CHECK-INTERLEAVE1-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT1]], [[N_VEC]] ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: @@ -52,8 +50,6 @@ define i32 @dotp(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 ; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] ; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] -; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8 ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT1:%.*]], [[VECTOR_BODY]] ] @@ -79,7 +75,7 @@ define i32 @dotp(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = mul <vscale x 4 x i32> [[TMP29]], [[TMP13]] ; CHECK-INTERLEAVED-NEXT: [[TMP23]] = add <vscale x 4 x i32> [[TMP30]], [[VEC_PHI]] ; CHECK-INTERLEAVED-NEXT: [[TMP24]] = add <vscale x 4 x i32> [[TMP22]], [[VEC_PHI1]] -; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT1]] = add nuw i64 [[INDEX1]], [[TMP5]] +; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT1]] = add nuw i64 [[INDEX1]], [[TMP3]] ; CHECK-INTERLEAVED-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT1]], [[N_VEC]] ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: @@ -100,8 +96,6 @@ define i32 @dotp(ptr %a, ptr %b) #0 { ; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 ; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] ; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] -; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-MAXBW-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8 ; CHECK-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-MAXBW: vector.body: ; CHECK-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -114,7 +108,7 @@ define i32 @dotp(ptr %a, ptr %b) #0 { ; CHECK-MAXBW-NEXT: [[TMP20:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD4]] to <vscale x 8 x i32> ; CHECK-MAXBW-NEXT: [[TMP22:%.*]] = mul <vscale x 8 x i32> [[TMP20]], [[TMP13]] ; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE5]] = call <vscale x 2 x i32> @llvm.experimental.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI1]], <vscale x 8 x i32> [[TMP22]]) -; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-MAXBW-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-MAXBW-NEXT: br i1 [[TMP23]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-MAXBW: middle.block: @@ -157,8 +151,6 @@ define i64 @not_dotp_i8_to_i64_has_neon_dotprod(ptr readonly %a, ptr readonly %b ; CHECK-INTERLEAVE1-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP9]], 2 ; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP12]] ; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = sub i64 1024, [[N_MOD_VF]] -; CHECK-INTERLEAVE1-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP18]], 2 ; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP10]] ; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP10]] ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] @@ -173,7 +165,7 @@ define i64 @not_dotp_i8_to_i64_has_neon_dotprod(ptr readonly %a, ptr readonly %b ; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = zext <vscale x 2 x i8> [[WIDE_LOAD2]] to <vscale x 2 x i64> ; CHECK-INTERLEAVE1-NEXT: [[TMP14:%.*]] = mul nuw nsw <vscale x 2 x i64> [[TMP13]], [[TMP11]] ; CHECK-INTERLEAVE1-NEXT: [[TMP15]] = add <vscale x 2 x i64> [[TMP14]], [[VEC_PHI]] -; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]] ; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[TMP10]] ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: @@ -193,8 +185,6 @@ define i64 @not_dotp_i8_to_i64_has_neon_dotprod(ptr readonly %a, ptr readonly %b ; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP9]], 4 ; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP16]] ; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = sub i64 1024, [[N_MOD_VF]] -; CHECK-INTERLEAVED-NEXT: [[TMP28:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP28]], 4 ; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP10]] ; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP10]] ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] @@ -222,7 +212,7 @@ define i64 @not_dotp_i8_to_i64_has_neon_dotprod(ptr readonly %a, ptr readonly %b ; CHECK-INTERLEAVED-NEXT: [[TMP23:%.*]] = mul nuw nsw <vscale x 2 x i64> [[TMP21]], [[TMP15]] ; CHECK-INTERLEAVED-NEXT: [[TMP24]] = add <vscale x 2 x i64> [[TMP22]], [[VEC_PHI]] ; CHECK-INTERLEAVED-NEXT: [[TMP25]] = add <vscale x 2 x i64> [[TMP23]], [[VEC_PHI1]] -; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP16]] ; CHECK-INTERLEAVED-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[TMP10]] ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: @@ -243,8 +233,6 @@ define i64 @not_dotp_i8_to_i64_has_neon_dotprod(ptr readonly %a, ptr readonly %b ; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 ; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] ; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] -; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-MAXBW-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8 ; CHECK-MAXBW-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[A]], i64 [[N_VEC]] ; CHECK-MAXBW-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[B]], i64 [[N_VEC]] ; CHECK-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]] @@ -259,7 +247,7 @@ define i64 @not_dotp_i8_to_i64_has_neon_dotprod(ptr readonly %a, ptr readonly %b ; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD2]] to <vscale x 8 x i64> ; CHECK-MAXBW-NEXT: [[TMP14:%.*]] = mul nuw nsw <vscale x 8 x i64> [[TMP13]], [[TMP11]] ; CHECK-MAXBW-NEXT: [[TMP15]] = add <vscale x 8 x i64> [[TMP14]], [[VEC_PHI]] -; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-MAXBW-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-MAXBW-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK-MAXBW: middle.block: @@ -304,8 +292,6 @@ define i64 @not_dotp_i16_to_i64_has_neon_dotprod(ptr readonly %a, ptr readonly % ; CHECK-INTERLEAVE1-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 2 ; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP12]] ; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = sub i64 1024, [[N_MOD_VF]] -; CHECK-INTERLEAVE1-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP14]], 2 ; CHECK-INTERLEAVE1-NEXT: [[TMP20:%.*]] = mul i64 [[TMP10]], 2 ; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP20]] ; CHECK-INTERLEAVE1-NEXT: [[TMP8:%.*]] = mul i64 [[TMP10]], 2 @@ -324,7 +310,7 @@ define i64 @not_dotp_i16_to_i64_has_neon_dotprod(ptr readonly %a, ptr readonly % ; CHECK-INTERLEAVE1-NEXT: [[TMP15:%.*]] = zext <vscale x 2 x i16> [[WIDE_LOAD3]] to <vscale x 2 x i64> ; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = mul nuw nsw <vscale x 2 x i64> [[TMP15]], [[TMP13]] ; CHECK-INTERLEAVE1-NEXT: [[TMP17]] = add <vscale x 2 x i64> [[TMP16]], [[VEC_PHI]] -; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]] ; CHECK-INTERLEAVE1-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[TMP10]] ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: @@ -344,8 +330,6 @@ define i64 @not_dotp_i16_to_i64_has_neon_dotprod(ptr readonly %a, ptr readonly % ; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 4 ; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP11]] ; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = sub i64 1024, [[N_MOD_VF]] -; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP12]], 4 ; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = mul i64 [[TMP15]], 2 ; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP6]] ; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = mul i64 [[TMP15]], 2 @@ -377,7 +361,7 @@ define i64 @not_dotp_i16_to_i64_has_neon_dotprod(ptr readonly %a, ptr readonly % ; CHECK-INTERLEAVED-NEXT: [[TMP25:%.*]] = mul nuw nsw <vscale x 2 x i64> [[TMP23]], [[TMP17]] ; CHECK-INTERLEAVED-NEXT: [[TMP26]] = add <vscale x 2 x i64> [[TMP24]], [[VEC_PHI]] ; CHECK-INTERLEAVED-NEXT: [[TMP27]] = add <vscale x 2 x i64> [[TMP25]], [[VEC_PHI1]] -; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP11]] ; CHECK-INTERLEAVED-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[TMP15]] ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: @@ -398,8 +382,6 @@ define i64 @not_dotp_i16_to_i64_has_neon_dotprod(ptr readonly %a, ptr readonly % ; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] ; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] -; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-MAXBW-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-MAXBW-NEXT: [[TMP6:%.*]] = mul i64 [[N_VEC]], 2 ; CHECK-MAXBW-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP6]] ; CHECK-MAXBW-NEXT: [[TMP8:%.*]] = mul i64 [[N_VEC]], 2 @@ -418,7 +400,7 @@ define i64 @not_dotp_i16_to_i64_has_neon_dotprod(ptr readonly %a, ptr readonly % ; CHECK-MAXBW-NEXT: [[TMP15:%.*]] = zext <vscale x 4 x i16> [[WIDE_LOAD3]] to <vscale x 4 x i64> ; CHECK-MAXBW-NEXT: [[TMP16:%.*]] = mul nuw nsw <vscale x 4 x i64> [[TMP15]], [[TMP13]] ; CHECK-MAXBW-NEXT: [[TMP17]] = add <vscale x 4 x i64> [[TMP16]], [[VEC_PHI]] -; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-MAXBW-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-MAXBW-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK-MAXBW: middle.block: @@ -819,8 +801,6 @@ define i32 @not_dotp_not_loop_carried(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 ; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] ; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] -; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8 ; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = call i32 @llvm.vscale.i32() ; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = mul nuw i32 [[TMP6]], 8 ; CHECK-INTERLEAVE1-NEXT: [[TMP8:%.*]] = sub i32 [[TMP7]], 1 @@ -838,7 +818,7 @@ define i32 @not_dotp_not_loop_carried(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP16]] = mul <vscale x 8 x i32> [[TMP15]], [[TMP12]] ; CHECK-INTERLEAVE1-NEXT: [[TMP17:%.*]] = call <vscale x 8 x i32> @llvm.vector.splice.nxv8i32(<vscale x 8 x i32> [[VECTOR_RECUR]], <vscale x 8 x i32> [[TMP16]], i32 -1) ; CHECK-INTERLEAVE1-NEXT: [[TMP18:%.*]] = add <vscale x 8 x i32> [[TMP16]], [[TMP17]] -; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-INTERLEAVE1-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: @@ -865,8 +845,6 @@ define i32 @not_dotp_not_loop_carried(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16 ; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] ; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] -; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 16 ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -890,7 +868,7 @@ define i32 @not_dotp_not_loop_carried(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP25:%.*]] = mul <vscale x 8 x i32> [[TMP23]], [[TMP16]] ; CHECK-INTERLEAVED-NEXT: [[TMP26:%.*]] = call <vscale x 8 x i32> @llvm.vector.splice.nxv8i32(<vscale x 8 x i32> [[TMP24]], <vscale x 8 x i32> [[TMP25]], i32 -1) ; CHECK-INTERLEAVED-NEXT: [[TMP27:%.*]] = add <vscale x 8 x i32> [[TMP25]], [[TMP26]] -; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-INTERLEAVED-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: @@ -917,8 +895,6 @@ define i32 @not_dotp_not_loop_carried(ptr %a, ptr %b) #0 { ; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 ; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] ; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] -; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-MAXBW-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8 ; CHECK-MAXBW-NEXT: [[TMP6:%.*]] = call i32 @llvm.vscale.i32() ; CHECK-MAXBW-NEXT: [[TMP7:%.*]] = mul nuw i32 [[TMP6]], 8 ; CHECK-MAXBW-NEXT: [[TMP8:%.*]] = sub i32 [[TMP7]], 1 @@ -936,7 +912,7 @@ define i32 @not_dotp_not_loop_carried(ptr %a, ptr %b) #0 { ; CHECK-MAXBW-NEXT: [[TMP25]] = mul <vscale x 8 x i32> [[TMP23]], [[TMP16]] ; CHECK-MAXBW-NEXT: [[TMP26:%.*]] = call <vscale x 8 x i32> @llvm.vector.splice.nxv8i32(<vscale x 8 x i32> [[VECTOR_RECUR]], <vscale x 8 x i32> [[TMP25]], i32 -1) ; CHECK-MAXBW-NEXT: [[TMP27:%.*]] = add <vscale x 8 x i32> [[TMP25]], [[TMP26]] -; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-MAXBW-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-MAXBW-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK-MAXBW: middle.block: @@ -986,8 +962,6 @@ define i32 @not_dotp_not_phi(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 ; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] ; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] -; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8 ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -999,7 +973,7 @@ define i32 @not_dotp_not_phi(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP15:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD1]] to <vscale x 8 x i32> ; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = mul <vscale x 8 x i32> [[TMP15]], [[TMP12]] ; CHECK-INTERLEAVE1-NEXT: [[TMP17:%.*]] = add <vscale x 8 x i32> [[TMP16]], [[TMP15]] -; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-INTERLEAVE1-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: @@ -1026,8 +1000,6 @@ define i32 @not_dotp_not_phi(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16 ; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] ; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] -; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 16 ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -1045,7 +1017,7 @@ define i32 @not_dotp_not_phi(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD1]] to <vscale x 8 x i32> ; CHECK-INTERLEAVED-NEXT: [[TMP30:%.*]] = mul <vscale x 8 x i32> [[TMP22]], [[TMP15]] ; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = add <vscale x 8 x i32> [[TMP30]], [[TMP22]] -; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-INTERLEAVED-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: @@ -1072,8 +1044,6 @@ define i32 @not_dotp_not_phi(ptr %a, ptr %b) #0 { ; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 ; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] ; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] -; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-MAXBW-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8 ; CHECK-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-MAXBW: vector.body: ; CHECK-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -1085,7 +1055,7 @@ define i32 @not_dotp_not_phi(ptr %a, ptr %b) #0 { ; CHECK-MAXBW-NEXT: [[TMP19:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD1]] to <vscale x 8 x i32> ; CHECK-MAXBW-NEXT: [[TMP20:%.*]] = mul <vscale x 8 x i32> [[TMP19]], [[TMP14]] ; CHECK-MAXBW-NEXT: [[TMP21:%.*]] = add <vscale x 8 x i32> [[TMP20]], [[TMP19]] -; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-MAXBW-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-MAXBW-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK-MAXBW: middle.block: @@ -1136,8 +1106,6 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP16]], 4 ; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[NUM_IN]], [[TMP18]] ; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 [[NUM_IN]], [[N_MOD_VF]] -; CHECK-INTERLEAVE1-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVE1-NEXT: [[TMP26:%.*]] = mul nuw i64 [[TMP20]], 4 ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -1180,7 +1148,7 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP39:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD10]] to <vscale x 4 x i32> ; CHECK-INTERLEAVE1-NEXT: [[TMP40:%.*]] = mul nsw <vscale x 4 x i32> [[TMP37]], [[TMP39]] ; CHECK-INTERLEAVE1-NEXT: [[TMP41]] = add <vscale x 4 x i32> [[TMP40]], [[VEC_PHI]] -; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP26]] +; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP18]] ; CHECK-INTERLEAVE1-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: @@ -1204,8 +1172,6 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP16]], 8 ; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[NUM_IN]], [[TMP18]] ; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 [[NUM_IN]], [[N_MOD_VF]] -; CHECK-INTERLEAVED-NEXT: [[TMP34:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP40:%.*]] = mul nuw i64 [[TMP34]], 8 ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -1300,7 +1266,7 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP79:%.*]] = mul nsw <vscale x 4 x i32> [[TMP71]], [[TMP77]] ; CHECK-INTERLEAVED-NEXT: [[TMP80]] = add <vscale x 4 x i32> [[TMP78]], [[VEC_PHI]] ; CHECK-INTERLEAVED-NEXT: [[TMP81]] = add <vscale x 4 x i32> [[TMP79]], [[VEC_PHI1]] -; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP40]] +; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP18]] ; CHECK-INTERLEAVED-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: @@ -1328,8 +1294,6 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) #0 { ; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 ; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[NUM_IN]], [[TMP3]] ; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 [[NUM_IN]], [[N_MOD_VF]] -; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-MAXBW-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8 ; CHECK-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-MAXBW: vector.body: ; CHECK-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -1372,7 +1336,7 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) #0 { ; CHECK-MAXBW-NEXT: [[TMP71:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD26]] to <vscale x 8 x i32> ; CHECK-MAXBW-NEXT: [[TMP73:%.*]] = mul nsw <vscale x 8 x i32> [[TMP65]], [[TMP71]] ; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE16]] = call <vscale x 2 x i32> @llvm.experimental.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI4]], <vscale x 8 x i32> [[TMP73]]) -; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-MAXBW-NEXT: [[TMP74:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-MAXBW-NEXT: br i1 [[TMP74]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK-MAXBW: middle.block: @@ -1445,12 +1409,6 @@ define i32 @dotp_predicated(i64 %N, ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: entry: ; CHECK-INTERLEAVE1-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVE1: vector.ph: -; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 -; CHECK-INTERLEAVE1-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP2]] -; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 ; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() @@ -1488,12 +1446,6 @@ define i32 @dotp_predicated(i64 %N, ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-NEXT: entry: ; CHECK-INTERLEAVED-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVED: vector.ph: -; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 -; CHECK-INTERLEAVED-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP2]] -; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 ; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() @@ -1531,12 +1483,6 @@ define i32 @dotp_predicated(i64 %N, ptr %a, ptr %b) #0 { ; CHECK-MAXBW-NEXT: entry: ; CHECK-MAXBW-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-MAXBW: vector.ph: -; CHECK-MAXBW-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-MAXBW-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 16 -; CHECK-MAXBW-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 -; CHECK-MAXBW-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP2]] -; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 16 ; CHECK-MAXBW-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() @@ -1603,8 +1549,6 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP8]], 4 ; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP10]] ; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] -; CHECK-INTERLEAVE1-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP11]], 4 ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -1617,7 +1561,7 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP12:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD1]] to <vscale x 4 x i32> ; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = mul <vscale x 4 x i32> [[TMP12]], [[TMP9]] ; CHECK-INTERLEAVE1-NEXT: [[TMP14]] = add <vscale x 4 x i32> [[TMP13]], [[VEC_PHI]] -; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]] ; CHECK-INTERLEAVE1-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: @@ -1641,8 +1585,6 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 8 ; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP7]] ; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] -; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP14]], 8 ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -1668,7 +1610,7 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = mul <vscale x 4 x i32> [[TMP20]], [[TMP13]] ; CHECK-INTERLEAVED-NEXT: [[TMP23]] = add <vscale x 4 x i32> [[TMP21]], [[VEC_PHI]] ; CHECK-INTERLEAVED-NEXT: [[TMP24]] = add <vscale x 4 x i32> [[TMP22]], [[VEC_PHI1]] -; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]] ; CHECK-INTERLEAVED-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: @@ -1693,8 +1635,6 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) #0 { ; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 ; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] ; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] -; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-MAXBW-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8 ; CHECK-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-MAXBW: vector.body: ; CHECK-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -1707,7 +1647,7 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) #0 { ; CHECK-MAXBW-NEXT: [[TMP20:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD4]] to <vscale x 8 x i32> ; CHECK-MAXBW-NEXT: [[TMP22:%.*]] = mul <vscale x 8 x i32> [[TMP20]], [[TMP13]] ; CHECK-MAXBW-NEXT: [[TMP24]] = add <vscale x 8 x i32> [[TMP22]], [[VEC_PHI1]] -; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-MAXBW-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-MAXBW-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; CHECK-MAXBW: middle.block: @@ -1755,8 +1695,6 @@ define i64 @dotp_cost_disagreement(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 ; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 41, [[TMP3]] ; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 41, [[N_MOD_VF]] -; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -1770,7 +1708,7 @@ define i64 @dotp_cost_disagreement(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = zext <vscale x 2 x i8> [[WIDE_LOAD1]] to <vscale x 2 x i64> ; CHECK-INTERLEAVE1-NEXT: [[TMP14:%.*]] = mul nuw nsw <vscale x 2 x i64> [[TMP13]], [[TMP9]] ; CHECK-INTERLEAVE1-NEXT: [[TMP15]] = add <vscale x 2 x i64> [[VEC_PHI]], [[TMP14]] -; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: @@ -1791,8 +1729,6 @@ define i64 @dotp_cost_disagreement(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 41, [[TMP3]] ; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 41, [[N_MOD_VF]] -; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -1819,7 +1755,7 @@ define i64 @dotp_cost_disagreement(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP23:%.*]] = mul nuw nsw <vscale x 2 x i64> [[TMP21]], [[TMP13]] ; CHECK-INTERLEAVED-NEXT: [[TMP24]] = add <vscale x 2 x i64> [[VEC_PHI]], [[TMP22]] ; CHECK-INTERLEAVED-NEXT: [[TMP25]] = add <vscale x 2 x i64> [[VEC_PHI1]], [[TMP23]] -; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-INTERLEAVED-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: @@ -1841,8 +1777,6 @@ define i64 @dotp_cost_disagreement(ptr %a, ptr %b) #0 { ; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 ; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 41, [[TMP3]] ; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 41, [[N_MOD_VF]] -; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-MAXBW-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8 ; CHECK-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-MAXBW: vector.body: ; CHECK-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -1853,14 +1787,14 @@ define i64 @dotp_cost_disagreement(ptr %a, ptr %b) #0 { ; CHECK-MAXBW-NEXT: [[TMP10:%.*]] = add nuw nsw i64 [[INDEX]], 1 ; CHECK-MAXBW-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[TMP10]] ; CHECK-MAXBW-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 8 x i8>, ptr [[TMP11]], align 1 -; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD1]] to <vscale x 8 x i64> -; CHECK-MAXBW-NEXT: [[TMP15:%.*]] = mul nuw nsw <vscale x 8 x i64> [[TMP12]], [[TMP9]] -; CHECK-MAXBW-NEXT: [[TMP14]] = add <vscale x 8 x i64> [[VEC_PHI]], [[TMP15]] -; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-MAXBW-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] +; CHECK-MAXBW-NEXT: [[TMP8:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD1]] to <vscale x 8 x i64> +; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = mul nuw nsw <vscale x 8 x i64> [[TMP8]], [[TMP9]] +; CHECK-MAXBW-NEXT: [[TMP14]] = add <vscale x 8 x i64> [[VEC_PHI]], [[TMP13]] +; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] +; CHECK-MAXBW-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-MAXBW-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] ; CHECK-MAXBW: middle.block: -; CHECK-MAXBW-NEXT: [[TMP16:%.*]] = call i64 @llvm.vector.reduce.add.nxv8i64(<vscale x 8 x i64> [[TMP14]]) +; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = call i64 @llvm.vector.reduce.add.nxv8i64(<vscale x 8 x i64> [[TMP14]]) ; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 41, [[N_VEC]] ; CHECK-MAXBW-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] ; CHECK-MAXBW: scalar.ph: @@ -2150,8 +2084,6 @@ define i64 @not_dotp_ext_outside_plan(ptr %a, i16 %b, i64 %n) #0 { ; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-MAXBW-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-MAXBW-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[EXT_B]], i64 0 ; CHECK-MAXBW-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer ; CHECK-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]] @@ -2163,7 +2095,7 @@ define i64 @not_dotp_ext_outside_plan(ptr %a, i16 %b, i64 %n) #0 { ; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = zext <vscale x 4 x i16> [[WIDE_LOAD]] to <vscale x 4 x i64> ; CHECK-MAXBW-NEXT: [[TMP10:%.*]] = mul nuw nsw <vscale x 4 x i64> [[TMP9]], [[BROADCAST_SPLAT]] ; CHECK-MAXBW-NEXT: [[TMP11]] = add <vscale x 4 x i64> [[TMP10]], [[VEC_PHI]] -; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-MAXBW-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] ; CHECK-MAXBW: middle.block: @@ -2285,8 +2217,6 @@ define i64 @not_dotp_ext_outside_plan2(ptr %a, i16 %b, i64 %n) #0 { ; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-MAXBW-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-MAXBW-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[EXT_B]], i64 0 ; CHECK-MAXBW-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer ; CHECK-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]] @@ -2298,7 +2228,7 @@ define i64 @not_dotp_ext_outside_plan2(ptr %a, i16 %b, i64 %n) #0 { ; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = zext <vscale x 4 x i16> [[WIDE_LOAD]] to <vscale x 4 x i64> ; CHECK-MAXBW-NEXT: [[TMP10:%.*]] = mul nuw nsw <vscale x 4 x i64> [[BROADCAST_SPLAT]], [[TMP9]] ; CHECK-MAXBW-NEXT: [[TMP11]] = add <vscale x 4 x i64> [[TMP10]], [[VEC_PHI]] -; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-MAXBW-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]] ; CHECK-MAXBW: middle.block: @@ -2349,8 +2279,6 @@ define dso_local i32 @not_dotp_vscale1(ptr %a, ptr %b, i32 %n, i64 %cost) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2 ; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP4]] ; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]] -; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2 ; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = trunc i64 [[N_VEC]] to i32 ; CHECK-INTERLEAVE1-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[A]], i64 [[N_VEC]] ; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[B]], i64 [[N_VEC]] @@ -2367,7 +2295,7 @@ define dso_local i32 @not_dotp_vscale1(ptr %a, ptr %b, i32 %n, i64 %cost) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = zext <vscale x 2 x i8> [[WIDE_LOAD2]] to <vscale x 2 x i64> ; CHECK-INTERLEAVE1-NEXT: [[TMP17:%.*]] = mul nuw nsw <vscale x 2 x i64> [[TMP16]], [[TMP14]] ; CHECK-INTERLEAVE1-NEXT: [[TMP18]] = add <vscale x 2 x i64> [[TMP17]], [[VEC_PHI]] -; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] +; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]] ; CHECK-INTERLEAVE1-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: @@ -2392,8 +2320,6 @@ define dso_local i32 @not_dotp_vscale1(ptr %a, ptr %b, i32 %n, i64 %cost) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 ; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP4]] ; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]] -; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4 ; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = trunc i64 [[N_VEC]] to i32 ; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[A]], i64 [[N_VEC]] ; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[B]], i64 [[N_VEC]] @@ -2423,7 +2349,7 @@ define dso_local i32 @not_dotp_vscale1(ptr %a, ptr %b, i32 %n, i64 %cost) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP26:%.*]] = mul nuw nsw <vscale x 2 x i64> [[TMP24]], [[TMP18]] ; CHECK-INTERLEAVED-NEXT: [[TMP27]] = add <vscale x 2 x i64> [[TMP25]], [[VEC_PHI]] ; CHECK-INTERLEAVED-NEXT: [[TMP28]] = add <vscale x 2 x i64> [[TMP26]], [[VEC_PHI1]] -; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] +; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]] ; CHECK-INTERLEAVED-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: @@ -2449,8 +2375,6 @@ define dso_local i32 @not_dotp_vscale1(ptr %a, ptr %b, i32 %n, i64 %cost) #0 { ; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 8 ; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP4]] ; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]] -; CHECK-MAXBW-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-MAXBW-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 8 ; CHECK-MAXBW-NEXT: [[TMP7:%.*]] = trunc i64 [[N_VEC]] to i32 ; CHECK-MAXBW-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[A]], i64 [[N_VEC]] ; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[B]], i64 [[N_VEC]] @@ -2467,7 +2391,7 @@ define dso_local i32 @not_dotp_vscale1(ptr %a, ptr %b, i32 %n, i64 %cost) #0 { ; CHECK-MAXBW-NEXT: [[TMP16:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD2]] to <vscale x 8 x i64> ; CHECK-MAXBW-NEXT: [[TMP17:%.*]] = mul nuw nsw <vscale x 8 x i64> [[TMP16]], [[TMP14]] ; CHECK-MAXBW-NEXT: [[TMP20]] = add <vscale x 8 x i64> [[TMP17]], [[VEC_PHI]] -; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] +; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]] ; CHECK-MAXBW-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-MAXBW-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]] ; CHECK-MAXBW: middle.block: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-sub.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-sub.ll index a46340c..17da2af 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-sub.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-sub.ll @@ -18,8 +18,6 @@ define i32 @dotp(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] ; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] -; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -33,7 +31,7 @@ define i32 @dotp(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = mul <vscale x 4 x i32> [[TMP12]], [[TMP9]] ; CHECK-INTERLEAVE1-NEXT: [[TMP14:%.*]] = sub <vscale x 4 x i32> zeroinitializer, [[TMP13]] ; CHECK-INTERLEAVE1-NEXT: [[TMP15]] = add <vscale x 4 x i32> [[VEC_PHI]], [[TMP14]] -; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: @@ -53,8 +51,6 @@ define i32 @dotp(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 ; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] ; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] -; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8 ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -82,7 +78,7 @@ define i32 @dotp(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP24:%.*]] = sub <vscale x 4 x i32> zeroinitializer, [[TMP22]] ; CHECK-INTERLEAVED-NEXT: [[TMP25]] = add <vscale x 4 x i32> [[VEC_PHI]], [[TMP23]] ; CHECK-INTERLEAVED-NEXT: [[TMP26]] = add <vscale x 4 x i32> [[VEC_PHI1]], [[TMP24]] -; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-INTERLEAVED-NEXT: [[TMP27:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP27]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: @@ -103,8 +99,6 @@ define i32 @dotp(ptr %a, ptr %b) #0 { ; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 ; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] ; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] -; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-MAXBW-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8 ; CHECK-MAXBW-NEXT: br label [[FOR_BODY:%.*]] ; CHECK-MAXBW: vector.body: ; CHECK-MAXBW-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] @@ -118,7 +112,7 @@ define i32 @dotp(ptr %a, ptr %b) #0 { ; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = mul <vscale x 8 x i32> [[TMP12]], [[TMP9]] ; CHECK-MAXBW-NEXT: [[TMP14:%.*]] = sub <vscale x 8 x i32> zeroinitializer, [[TMP13]] ; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 2 x i32> @llvm.experimental.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI]], <vscale x 8 x i32> [[TMP14]]) -; CHECK-MAXBW-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], [[TMP5]] +; CHECK-MAXBW-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], [[TMP3]] ; CHECK-MAXBW-NEXT: [[TMP16:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] ; CHECK-MAXBW-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-MAXBW: middle.block: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce.ll index d2c03d1..025a826 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce.ll @@ -68,8 +68,6 @@ define i32 @zext_add_reduc_i8_i32_sve(ptr %a) #0 { ; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16 ; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]] ; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] -; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-MAXBW-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 16 ; CHECK-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-MAXBW: vector.body: ; CHECK-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -78,7 +76,7 @@ define i32 @zext_add_reduc_i8_i32_sve(ptr %a) #0 { ; CHECK-MAXBW-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP6]], align 1 ; CHECK-MAXBW-NEXT: [[TMP8:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD]] to <vscale x 16 x i32> ; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 4 x i32> @llvm.experimental.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI]], <vscale x 16 x i32> [[TMP8]]) -; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-MAXBW-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-MAXBW: middle.block: @@ -254,8 +252,6 @@ define i64 @zext_add_reduc_i8_i64(ptr %a) #0 { ; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16 ; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]] ; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] -; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-MAXBW-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 16 ; CHECK-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-MAXBW: vector.body: ; CHECK-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -264,7 +260,7 @@ define i64 @zext_add_reduc_i8_i64(ptr %a) #0 { ; CHECK-MAXBW-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP7]], align 1 ; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD]] to <vscale x 16 x i64> ; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 2 x i64> @llvm.experimental.vector.partial.reduce.add.nxv2i64.nxv16i64(<vscale x 2 x i64> [[VEC_PHI]], <vscale x 16 x i64> [[TMP9]]) -; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-MAXBW-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-MAXBW-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK-MAXBW: middle.block: @@ -352,8 +348,6 @@ define i64 @zext_add_reduc_i16_i64(ptr %a) #0 { ; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 ; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]] ; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] -; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-MAXBW-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8 ; CHECK-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-MAXBW: vector.body: ; CHECK-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -362,7 +356,7 @@ define i64 @zext_add_reduc_i16_i64(ptr %a) #0 { ; CHECK-MAXBW-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i16>, ptr [[TMP7]], align 2 ; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = zext <vscale x 8 x i16> [[WIDE_LOAD]] to <vscale x 8 x i64> ; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 2 x i64> @llvm.experimental.vector.partial.reduce.add.nxv2i64.nxv8i64(<vscale x 2 x i64> [[VEC_PHI]], <vscale x 8 x i64> [[TMP9]]) -; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-MAXBW-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-MAXBW-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK-MAXBW: middle.block: @@ -450,8 +444,6 @@ define i32 @zext_add_reduc_i8_i32_has_neon_dotprod(ptr %a) #1 { ; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16 ; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]] ; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] -; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-MAXBW-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 16 ; CHECK-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-MAXBW: vector.body: ; CHECK-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -460,7 +452,7 @@ define i32 @zext_add_reduc_i8_i32_has_neon_dotprod(ptr %a) #1 { ; CHECK-MAXBW-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP7]], align 1 ; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD]] to <vscale x 16 x i32> ; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 4 x i32> @llvm.experimental.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI]], <vscale x 16 x i32> [[TMP9]]) -; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-MAXBW-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-MAXBW-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK-MAXBW: middle.block: @@ -496,12 +488,6 @@ define i32 @zext_add_reduc_i8_i32_predicated(ptr %a) #0 { ; CHECK-INTERLEAVE1: vector.ph: ; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 -; CHECK-INTERLEAVE1-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP2]] -; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] -; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 ; CHECK-INTERLEAVE1-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 1025) ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: @@ -513,7 +499,7 @@ define i32 @zext_add_reduc_i8_i32_predicated(ptr %a) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP8:%.*]] = zext <vscale x 4 x i8> [[WIDE_MASKED_LOAD]] to <vscale x 4 x i32> ; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = add <vscale x 4 x i32> [[TMP8]], [[VEC_PHI]] ; CHECK-INTERLEAVE1-NEXT: [[TMP10]] = select <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i32> [[TMP9]], <vscale x 4 x i32> [[VEC_PHI]] -; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP4]] +; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]] ; CHECK-INTERLEAVE1-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX_NEXT]], i64 1025) ; CHECK-INTERLEAVE1-NEXT: [[TMP11:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) ; CHECK-INTERLEAVE1-NEXT: [[TMP12:%.*]] = extractelement <vscale x 4 x i1> [[TMP11]], i32 0 @@ -530,12 +516,6 @@ define i32 @zext_add_reduc_i8_i32_predicated(ptr %a) #0 { ; CHECK-INTERLEAVED: vector.ph: ; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 -; CHECK-INTERLEAVED-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP2]] -; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] -; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 ; CHECK-INTERLEAVED-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 1025) ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: @@ -547,7 +527,7 @@ define i32 @zext_add_reduc_i8_i32_predicated(ptr %a) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = zext <vscale x 4 x i8> [[WIDE_MASKED_LOAD]] to <vscale x 4 x i32> ; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = add <vscale x 4 x i32> [[TMP8]], [[VEC_PHI]] ; CHECK-INTERLEAVED-NEXT: [[TMP10]] = select <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i32> [[TMP9]], <vscale x 4 x i32> [[VEC_PHI]] -; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP4]] +; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]] ; CHECK-INTERLEAVED-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX_NEXT]], i64 1025) ; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) ; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = extractelement <vscale x 4 x i1> [[TMP11]], i32 0 @@ -564,12 +544,6 @@ define i32 @zext_add_reduc_i8_i32_predicated(ptr %a) #0 { ; CHECK-MAXBW: vector.ph: ; CHECK-MAXBW-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-MAXBW-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 16 -; CHECK-MAXBW-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 -; CHECK-MAXBW-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP2]] -; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] -; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 16 ; CHECK-MAXBW-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64 0, i64 1025) ; CHECK-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-MAXBW: vector.body: @@ -581,7 +555,7 @@ define i32 @zext_add_reduc_i8_i32_predicated(ptr %a) #0 { ; CHECK-MAXBW-NEXT: [[TMP8:%.*]] = zext <vscale x 16 x i8> [[WIDE_MASKED_LOAD]] to <vscale x 16 x i32> ; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = select <vscale x 16 x i1> [[ACTIVE_LANE_MASK]], <vscale x 16 x i32> [[TMP8]], <vscale x 16 x i32> zeroinitializer ; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 4 x i32> @llvm.experimental.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI]], <vscale x 16 x i32> [[TMP9]]) -; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP4]] +; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]] ; CHECK-MAXBW-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64 [[INDEX_NEXT]], i64 1025) ; CHECK-MAXBW-NEXT: [[TMP10:%.*]] = xor <vscale x 16 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) ; CHECK-MAXBW-NEXT: [[TMP11:%.*]] = extractelement <vscale x 16 x i1> [[TMP10]], i32 0 @@ -752,8 +726,6 @@ define i32 @zext_sub_reduc_i8_i32_has_neon_dotprod(ptr %a) #1 { ; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 ; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]] ; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] -; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-MAXBW-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8 ; CHECK-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-MAXBW: vector.body: ; CHECK-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -762,7 +734,7 @@ define i32 @zext_sub_reduc_i8_i32_has_neon_dotprod(ptr %a) #1 { ; CHECK-MAXBW-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i8>, ptr [[TMP7]], align 1 ; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD]] to <vscale x 8 x i32> ; CHECK-MAXBW-NEXT: [[TMP10]] = sub <vscale x 8 x i32> [[VEC_PHI]], [[TMP9]] -; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-MAXBW-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-MAXBW-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK-MAXBW: middle.block: @@ -850,8 +822,6 @@ define i32 @sext_add_reduc_i8_i32(ptr %a) #0 { ; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16 ; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]] ; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] -; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-MAXBW-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 16 ; CHECK-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-MAXBW: vector.body: ; CHECK-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -860,7 +830,7 @@ define i32 @sext_add_reduc_i8_i32(ptr %a) #0 { ; CHECK-MAXBW-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP7]], align 1 ; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = sext <vscale x 16 x i8> [[WIDE_LOAD]] to <vscale x 16 x i32> ; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 4 x i32> @llvm.experimental.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI]], <vscale x 16 x i32> [[TMP9]]) -; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-MAXBW-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-MAXBW-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK-MAXBW: middle.block: @@ -970,8 +940,6 @@ define i32 @add_of_zext_outside_loop(i32 %a, ptr noalias %b, i8 %c, i32 %d) #0 { ; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = mul nuw i32 [[TMP3]], 16 ; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP0]], [[TMP4]] ; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP0]], [[N_MOD_VF]] -; CHECK-MAXBW-NEXT: [[TMP5:%.*]] = call i32 @llvm.vscale.i32() -; CHECK-MAXBW-NEXT: [[TMP6:%.*]] = mul nuw i32 [[TMP5]], 16 ; CHECK-MAXBW-NEXT: [[TMP7:%.*]] = add i32 [[D]], [[N_VEC]] ; CHECK-MAXBW-NEXT: [[TMP8:%.*]] = insertelement <vscale x 16 x i32> zeroinitializer, i32 [[A]], i32 0 ; CHECK-MAXBW-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[CONV1]], i64 0 @@ -984,7 +952,7 @@ define i32 @add_of_zext_outside_loop(i32 %a, ptr noalias %b, i8 %c, i32 %d) #0 { ; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[B]], i32 [[OFFSET_IDX]] ; CHECK-MAXBW-NEXT: store <vscale x 16 x i8> zeroinitializer, ptr [[TMP9]], align 1 ; CHECK-MAXBW-NEXT: [[TMP11]] = add <vscale x 16 x i32> [[VEC_PHI]], [[BROADCAST_SPLAT]] -; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP6]] +; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP4]] ; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-MAXBW-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; CHECK-MAXBW: middle.block: @@ -1093,8 +1061,6 @@ define i32 @add_of_loop_invariant_zext(i32 %a, ptr %b, i8 %c, i32 %d) #0 { ; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = mul nuw i32 [[TMP3]], 16 ; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP0]], [[TMP4]] ; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP0]], [[N_MOD_VF]] -; CHECK-MAXBW-NEXT: [[TMP5:%.*]] = call i32 @llvm.vscale.i32() -; CHECK-MAXBW-NEXT: [[TMP6:%.*]] = mul nuw i32 [[TMP5]], 16 ; CHECK-MAXBW-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[C]], i64 0 ; CHECK-MAXBW-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer ; CHECK-MAXBW-NEXT: [[TMP7:%.*]] = add i32 [[D]], [[N_VEC]] @@ -1108,7 +1074,7 @@ define i32 @add_of_loop_invariant_zext(i32 %a, ptr %b, i8 %c, i32 %d) #0 { ; CHECK-MAXBW-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[B]], i32 [[OFFSET_IDX]] ; CHECK-MAXBW-NEXT: store <vscale x 16 x i8> zeroinitializer, ptr [[TMP10]], align 1 ; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 4 x i32> @llvm.experimental.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI]], <vscale x 16 x i32> [[TMP9]]) -; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP6]] +; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP4]] ; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-MAXBW-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; CHECK-MAXBW: middle.block: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/pr151664-cost-hoisted-vector-scalable.ll b/llvm/test/Transforms/LoopVectorize/AArch64/pr151664-cost-hoisted-vector-scalable.ll index b4df63d..5b0696e 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/pr151664-cost-hoisted-vector-scalable.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/pr151664-cost-hoisted-vector-scalable.ll @@ -12,13 +12,13 @@ define void @cost_hoisted_vector_code(ptr %p, float %arg) { ; CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.minimumnum.v4f32(<4 x float> [[BROADCAST_SPLAT]], <4 x float> zeroinitializer) ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[INDEX:%.*]] = add i64 1, [[INDEX1]] -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr float, ptr [[P]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP2:%.*]] = getelementptr float, ptr [[TMP8]], i32 4 -; CHECK-NEXT: store <4 x float> [[TMP0]], ptr [[TMP8]], align 4 +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 1, [[INDEX]] +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr float, ptr [[P]], i64 [[OFFSET_IDX]] +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr float, ptr [[TMP1]], i32 4 +; CHECK-NEXT: store <4 x float> [[TMP0]], ptr [[TMP1]], align 4 ; CHECK-NEXT: store <4 x float> [[TMP0]], ptr [[TMP2]], align 4 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX1]], 8 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], -8 ; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/pr60831-sve-inv-store-crash.ll b/llvm/test/Transforms/LoopVectorize/AArch64/pr60831-sve-inv-store-crash.ll index 6d5bbde..3b43d52 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/pr60831-sve-inv-store-crash.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/pr60831-sve-inv-store-crash.ll @@ -16,8 +16,6 @@ define void @test_invar_gep(ptr %dst) #0 { ; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP3]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]] -; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -36,7 +34,7 @@ define void @test_invar_gep(ptr %dst) #0 { ; CHECK-NEXT: [[TMP17:%.*]] = sub i32 [[TMP16]], 1 ; CHECK-NEXT: [[TMP18:%.*]] = extractelement <vscale x 4 x i64> [[TMP9]], i32 [[TMP17]] ; CHECK-NEXT: store i64 [[TMP18]], ptr [[TMP14:%.*]], align 1 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/pr73894.ll b/llvm/test/Transforms/LoopVectorize/AArch64/pr73894.ll index 830e7da..b732f88 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/pr73894.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/pr73894.ll @@ -12,9 +12,6 @@ define i32 @pr70988(ptr %src, i32 %n) { ; CHECK-NEXT: [[UMAX:%.*]] = zext i32 [[TMP2]] to i64 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[UMAX]], 1 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 2 -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = icmp ult i64 0, [[UMAX]] ; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY1:%.*]] = icmp ult i64 1, [[UMAX]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/reduction-recurrence-costs-sve.ll b/llvm/test/Transforms/LoopVectorize/AArch64/reduction-recurrence-costs-sve.ll index 381d2e1..dcaaa89 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/reduction-recurrence-costs-sve.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/reduction-recurrence-costs-sve.ll @@ -59,8 +59,6 @@ define i32 @chained_recurrences(i32 %x, i64 %y, ptr %src.1, i32 %z, ptr %src.2) ; VSCALEFORTUNING2-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 8 ; VSCALEFORTUNING2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP4]] ; VSCALEFORTUNING2-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]] -; VSCALEFORTUNING2-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; VSCALEFORTUNING2-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 8 ; VSCALEFORTUNING2-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[Z]], i64 0 ; VSCALEFORTUNING2-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer ; VSCALEFORTUNING2-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[X]], i64 0 @@ -121,7 +119,7 @@ define i32 @chained_recurrences(i32 %x, i64 %y, ptr %src.1, i32 %z, ptr %src.2) ; VSCALEFORTUNING2-NEXT: [[TMP46:%.*]] = or <vscale x 4 x i32> [[WIDE_MASKED_GATHER8]], [[VEC_PHI5]] ; VSCALEFORTUNING2-NEXT: [[TMP47]] = or <vscale x 4 x i32> [[TMP45]], [[WIDE_MASKED_GATHER9]] ; VSCALEFORTUNING2-NEXT: [[TMP48]] = or <vscale x 4 x i32> [[TMP46]], [[WIDE_MASKED_GATHER10]] -; VSCALEFORTUNING2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] +; VSCALEFORTUNING2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]] ; VSCALEFORTUNING2-NEXT: [[TMP49:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; VSCALEFORTUNING2-NEXT: br i1 [[TMP49]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; VSCALEFORTUNING2: [[MIDDLE_BLOCK]]: @@ -182,12 +180,6 @@ define i32 @chained_recurrences(i32 %x, i64 %y, ptr %src.1, i32 %z, ptr %src.2) ; PRED: [[VECTOR_PH]]: ; PRED-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() ; PRED-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP1]], 4 -; PRED-NEXT: [[TMP3:%.*]] = sub i64 [[TMP2]], 1 -; PRED-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP0]], [[TMP3]] -; PRED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP2]] -; PRED-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] -; PRED-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; PRED-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; PRED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[Z]], i64 0 ; PRED-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer ; PRED-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[X]], i64 0 @@ -218,7 +210,7 @@ define i32 @chained_recurrences(i32 %x, i64 %y, ptr %src.1, i32 %z, ptr %src.2) ; PRED-NEXT: [[VECTOR_RECUR_INIT3:%.*]] = insertelement <vscale x 4 x i32> poison, i32 0, i32 [[TMP27]] ; PRED-NEXT: br label %[[VECTOR_BODY:.*]] ; PRED: [[VECTOR_BODY]]: -; PRED-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[IV_NEXT:%.*]], %[[VECTOR_BODY]] ] +; PRED-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; PRED-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 4 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], %[[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ] ; PRED-NEXT: [[VECTOR_RECUR:%.*]] = phi <vscale x 4 x i32> [ [[VECTOR_RECUR_INIT]], %[[VECTOR_PH]] ], [ [[BROADCAST_SPLAT6:%.*]], %[[VECTOR_BODY]] ] ; PRED-NEXT: [[VECTOR_RECUR4:%.*]] = phi <vscale x 4 x i32> [ [[VECTOR_RECUR_INIT3]], %[[VECTOR_PH]] ], [ [[TMP29:%.*]], %[[VECTOR_BODY]] ] @@ -241,7 +233,7 @@ define i32 @chained_recurrences(i32 %x, i64 %y, ptr %src.1, i32 %z, ptr %src.2) ; PRED-NEXT: [[TMP39:%.*]] = or <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], [[VEC_PHI]] ; PRED-NEXT: [[TMP40:%.*]] = or <vscale x 4 x i32> [[TMP39]], [[WIDE_MASKED_GATHER7]] ; PRED-NEXT: [[TMP41]] = select <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i32> [[TMP40]], <vscale x 4 x i32> [[VEC_PHI]] -; PRED-NEXT: [[IV_NEXT]] = add i64 [[IV]], [[TMP5]] +; PRED-NEXT: [[INDEX_NEXT]] = add i64 [[IV]], [[TMP2]] ; PRED-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[IV]], i64 [[TMP10]]) ; PRED-NEXT: [[TMP42:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) ; PRED-NEXT: [[TMP43:%.*]] = extractelement <vscale x 4 x i1> [[TMP42]], i32 0 @@ -343,8 +335,6 @@ define i16 @reduce_udiv(ptr %src, i16 %x, i64 %N) #0 { ; DEFAULT-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 8 ; DEFAULT-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP4]] ; DEFAULT-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]] -; DEFAULT-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; DEFAULT-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 8 ; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i16> poison, i16 [[X]], i64 0 ; DEFAULT-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i16> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer ; DEFAULT-NEXT: br label %[[VECTOR_BODY:.*]] @@ -362,7 +352,7 @@ define i16 @reduce_udiv(ptr %src, i16 %x, i64 %N) #0 { ; DEFAULT-NEXT: [[TMP20:%.*]] = udiv <vscale x 4 x i16> [[WIDE_LOAD2]], [[BROADCAST_SPLAT]] ; DEFAULT-NEXT: [[TMP21]] = or <vscale x 4 x i16> [[TMP19]], [[VEC_PHI]] ; DEFAULT-NEXT: [[TMP22]] = or <vscale x 4 x i16> [[TMP20]], [[VEC_PHI1]] -; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] +; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]] ; DEFAULT-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; DEFAULT-NEXT: br i1 [[TMP23]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; DEFAULT: [[MIDDLE_BLOCK]]: @@ -401,8 +391,6 @@ define i16 @reduce_udiv(ptr %src, i16 %x, i64 %N) #0 { ; VSCALEFORTUNING2-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 8 ; VSCALEFORTUNING2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP4]] ; VSCALEFORTUNING2-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]] -; VSCALEFORTUNING2-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; VSCALEFORTUNING2-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 8 ; VSCALEFORTUNING2-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i16> poison, i16 [[X]], i64 0 ; VSCALEFORTUNING2-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i16> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer ; VSCALEFORTUNING2-NEXT: br label %[[VECTOR_BODY:.*]] @@ -420,7 +408,7 @@ define i16 @reduce_udiv(ptr %src, i16 %x, i64 %N) #0 { ; VSCALEFORTUNING2-NEXT: [[TMP14:%.*]] = udiv <vscale x 4 x i16> [[WIDE_LOAD2]], [[BROADCAST_SPLAT]] ; VSCALEFORTUNING2-NEXT: [[TMP15]] = or <vscale x 4 x i16> [[TMP13]], [[VEC_PHI]] ; VSCALEFORTUNING2-NEXT: [[TMP16]] = or <vscale x 4 x i16> [[TMP14]], [[VEC_PHI1]] -; VSCALEFORTUNING2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] +; VSCALEFORTUNING2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]] ; VSCALEFORTUNING2-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; VSCALEFORTUNING2-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; VSCALEFORTUNING2: [[MIDDLE_BLOCK]]: @@ -454,12 +442,6 @@ define i16 @reduce_udiv(ptr %src, i16 %x, i64 %N) #0 { ; PRED: [[VECTOR_PH]]: ; PRED-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() ; PRED-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP1]], 8 -; PRED-NEXT: [[TMP5:%.*]] = sub i64 [[TMP2]], 1 -; PRED-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP0]], [[TMP5]] -; PRED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP2]] -; PRED-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] -; PRED-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() -; PRED-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 8 ; PRED-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() ; PRED-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 8 ; PRED-NEXT: [[TMP10:%.*]] = sub i64 [[TMP0]], [[TMP9]] @@ -478,7 +460,7 @@ define i16 @reduce_udiv(ptr %src, i16 %x, i64 %N) #0 { ; PRED-NEXT: [[TMP20:%.*]] = udiv <vscale x 8 x i16> [[WIDE_MASKED_LOAD]], [[BROADCAST_SPLAT]] ; PRED-NEXT: [[TMP21:%.*]] = or <vscale x 8 x i16> [[TMP20]], [[VEC_PHI]] ; PRED-NEXT: [[TMP16]] = select <vscale x 8 x i1> [[ACTIVE_LANE_MASK]], <vscale x 8 x i16> [[TMP21]], <vscale x 8 x i16> [[VEC_PHI]] -; PRED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP7]] +; PRED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP2]] ; PRED-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[INDEX]], i64 [[TMP12]]) ; PRED-NEXT: [[TMP17:%.*]] = xor <vscale x 8 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) ; PRED-NEXT: [[TMP18:%.*]] = extractelement <vscale x 8 x i1> [[TMP17]], i32 0 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-avoid-scalarization.ll b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-avoid-scalarization.ll index eb3d724..8d8d427 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-avoid-scalarization.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-avoid-scalarization.ll @@ -23,15 +23,13 @@ define void @test_no_scalarization(ptr %a, ptr noalias %b, i32 %idx, i32 %n) #0 ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i32 [[TMP4]], 2 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP1]], [[TMP5]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP1]], [[N_MOD_VF]] -; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vscale.i32() -; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i32 [[TMP6]], 2 ; CHECK-NEXT: [[IND_END:%.*]] = add i32 [[IDX]], [[N_VEC]] ; CHECK-NEXT: [[TMP8:%.*]] = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32() ; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[IDX]], i64 0 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i32> [[DOTSPLATINSERT]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer ; CHECK-NEXT: [[TMP10:%.*]] = mul <vscale x 2 x i32> [[TMP8]], splat (i32 1) ; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i32> [[DOTSPLAT]], [[TMP10]] -; CHECK-NEXT: [[TMP13:%.*]] = mul i32 1, [[TMP7]] +; CHECK-NEXT: [[TMP13:%.*]] = mul i32 1, [[TMP5]] ; CHECK-NEXT: [[DOTSPLATINSERT1:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP13]], i64 0 ; CHECK-NEXT: [[DOTSPLAT2:%.*]] = shufflevector <vscale x 2 x i32> [[DOTSPLATINSERT1]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] @@ -44,7 +42,7 @@ define void @test_no_scalarization(ptr %a, ptr noalias %b, i32 %idx, i32 %n) #0 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x double>, ptr [[TMP16]], align 8 ; CHECK-NEXT: [[TMP18:%.*]] = getelementptr i64, ptr [[B:%.*]], i32 [[OFFSET_IDX]] ; CHECK-NEXT: store <vscale x 2 x double> [[WIDE_LOAD]], ptr [[TMP18]], align 8 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP7]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP5]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i32> [[VEC_IND]], [[DOTSPLAT2]] ; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-reduction-inloop-cond.ll b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-reduction-inloop-cond.ll index a7ec749..c78f68f 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-reduction-inloop-cond.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-reduction-inloop-cond.ll @@ -14,8 +14,6 @@ define float @cond_fadd(ptr noalias nocapture readonly %a, ptr noalias nocapture ; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -28,7 +26,7 @@ define float @cond_fadd(ptr noalias nocapture readonly %a, ptr noalias nocapture ; CHECK-NEXT: [[TMP12:%.*]] = select fast <vscale x 4 x i1> [[TMP9]], <vscale x 4 x float> [[WIDE_MASKED_LOAD]], <vscale x 4 x float> zeroinitializer ; CHECK-NEXT: [[TMP13:%.*]] = call fast float @llvm.vector.reduce.fadd.nxv4f32(float 0.000000e+00, <vscale x 4 x float> [[TMP12]]) ; CHECK-NEXT: [[TMP14]] = fadd fast float [[VEC_PHI]], [[TMP13]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: @@ -98,8 +96,6 @@ define float @cond_cmp_sel(ptr noalias %a, ptr noalias %cond, i64 %N) { ; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -113,7 +109,7 @@ define float @cond_cmp_sel(ptr noalias %a, ptr noalias %cond, i64 %N) { ; CHECK-NEXT: [[TMP13:%.*]] = call fast float @llvm.vector.reduce.fmin.nxv4f32(<vscale x 4 x float> [[TMP12]]) ; CHECK-NEXT: [[RDX_MINMAX_CMP:%.*]] = fcmp fast olt float [[TMP13]], [[VEC_PHI]] ; CHECK-NEXT: [[RDX_MINMAX_SELECT]] = select fast i1 [[RDX_MINMAX_CMP]], float [[TMP13]], float [[VEC_PHI]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll index 0cad053..b0ee9fc 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll @@ -42,37 +42,35 @@ define float @fadd_strict(ptr noalias nocapture readonly %a, i64 %n) #0 { ; CHECK-UNORDERED-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 ; CHECK-UNORDERED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; CHECK-UNORDERED-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; CHECK-UNORDERED-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-UNORDERED-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8 ; CHECK-UNORDERED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-UNORDERED: vector.body: ; CHECK-UNORDERED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-UNORDERED-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x float> [ insertelement (<vscale x 8 x float> splat (float -0.000000e+00), float 0.000000e+00, i32 0), [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ] -; CHECK-UNORDERED-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] -; CHECK-UNORDERED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x float>, ptr [[TMP6]], align 4 -; CHECK-UNORDERED-NEXT: [[TMP7]] = fadd <vscale x 8 x float> [[WIDE_LOAD]], [[VEC_PHI]] -; CHECK-UNORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-UNORDERED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-UNORDERED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-UNORDERED-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x float> [ insertelement (<vscale x 8 x float> splat (float -0.000000e+00), float 0.000000e+00, i32 0), [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ] +; CHECK-UNORDERED-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] +; CHECK-UNORDERED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x float>, ptr [[TMP4]], align 4 +; CHECK-UNORDERED-NEXT: [[TMP5]] = fadd <vscale x 8 x float> [[WIDE_LOAD]], [[VEC_PHI]] +; CHECK-UNORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] +; CHECK-UNORDERED-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-UNORDERED-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-UNORDERED: middle.block: -; CHECK-UNORDERED-NEXT: [[TMP9:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float -0.000000e+00, <vscale x 8 x float> [[TMP7]]) +; CHECK-UNORDERED-NEXT: [[TMP7:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float -0.000000e+00, <vscale x 8 x float> [[TMP5]]) ; CHECK-UNORDERED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-UNORDERED-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] ; CHECK-UNORDERED: scalar.ph: ; CHECK-UNORDERED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] -; CHECK-UNORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP9]], [[MIDDLE_BLOCK]] ], [ 0.000000e+00, [[ENTRY]] ] +; CHECK-UNORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP7]], [[MIDDLE_BLOCK]] ], [ 0.000000e+00, [[ENTRY]] ] ; CHECK-UNORDERED-NEXT: br label [[FOR_BODY:%.*]] ; CHECK-UNORDERED: for.body: ; CHECK-UNORDERED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-UNORDERED-NEXT: [[SUM_07:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] ; CHECK-UNORDERED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; CHECK-UNORDERED-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-UNORDERED-NEXT: [[ADD]] = fadd float [[TMP10]], [[SUM_07]] +; CHECK-UNORDERED-NEXT: [[TMP8:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; CHECK-UNORDERED-NEXT: [[ADD]] = fadd float [[TMP8]], [[SUM_07]] ; CHECK-UNORDERED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-UNORDERED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] ; CHECK-UNORDERED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK-UNORDERED: for.end: -; CHECK-UNORDERED-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], [[FOR_BODY]] ], [ [[TMP9]], [[MIDDLE_BLOCK]] ] +; CHECK-UNORDERED-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], [[FOR_BODY]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ] ; CHECK-UNORDERED-NEXT: ret float [[ADD_LCSSA]] ; ; CHECK-ORDERED-LABEL: define float @fadd_strict @@ -87,36 +85,34 @@ define float @fadd_strict(ptr noalias nocapture readonly %a, i64 %n) #0 { ; CHECK-ORDERED-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 ; CHECK-ORDERED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; CHECK-ORDERED-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; CHECK-ORDERED-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8 ; CHECK-ORDERED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-ORDERED: vector.body: ; CHECK-ORDERED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-ORDERED-NEXT: [[VEC_PHI:%.*]] = phi float [ 0.000000e+00, [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ] -; CHECK-ORDERED-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] -; CHECK-ORDERED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x float>, ptr [[TMP6]], align 4 -; CHECK-ORDERED-NEXT: [[TMP7]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[VEC_PHI]], <vscale x 8 x float> [[WIDE_LOAD]]) -; CHECK-ORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-ORDERED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-ORDERED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-ORDERED-NEXT: [[VEC_PHI:%.*]] = phi float [ 0.000000e+00, [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ] +; CHECK-ORDERED-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] +; CHECK-ORDERED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x float>, ptr [[TMP4]], align 4 +; CHECK-ORDERED-NEXT: [[TMP5]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[VEC_PHI]], <vscale x 8 x float> [[WIDE_LOAD]]) +; CHECK-ORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] +; CHECK-ORDERED-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-ORDERED-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-ORDERED: middle.block: ; CHECK-ORDERED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-ORDERED-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] ; CHECK-ORDERED: scalar.ph: ; CHECK-ORDERED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] -; CHECK-ORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP7]], [[MIDDLE_BLOCK]] ], [ 0.000000e+00, [[ENTRY]] ] +; CHECK-ORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP5]], [[MIDDLE_BLOCK]] ], [ 0.000000e+00, [[ENTRY]] ] ; CHECK-ORDERED-NEXT: br label [[FOR_BODY:%.*]] ; CHECK-ORDERED: for.body: ; CHECK-ORDERED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-ORDERED-NEXT: [[SUM_07:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] ; CHECK-ORDERED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; CHECK-ORDERED-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-ORDERED-NEXT: [[ADD]] = fadd float [[TMP9]], [[SUM_07]] +; CHECK-ORDERED-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; CHECK-ORDERED-NEXT: [[ADD]] = fadd float [[TMP7]], [[SUM_07]] ; CHECK-ORDERED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-ORDERED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] ; CHECK-ORDERED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK-ORDERED: for.end: -; CHECK-ORDERED-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], [[FOR_BODY]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ] +; CHECK-ORDERED-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], [[FOR_BODY]] ], [ [[TMP5]], [[MIDDLE_BLOCK]] ] ; CHECK-ORDERED-NEXT: ret float [[ADD_LCSSA]] ; ; CHECK-ORDERED-TF-LABEL: define float @fadd_strict @@ -126,32 +122,26 @@ define float @fadd_strict(ptr noalias nocapture readonly %a, i64 %n) #0 { ; CHECK-ORDERED-TF: vector.ph: ; CHECK-ORDERED-TF-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-ORDERED-TF-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8 -; CHECK-ORDERED-TF-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 -; CHECK-ORDERED-TF-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP2]] -; CHECK-ORDERED-TF-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; CHECK-ORDERED-TF-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] -; CHECK-ORDERED-TF-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-TF-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 8 -; CHECK-ORDERED-TF-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-TF-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 8 -; CHECK-ORDERED-TF-NEXT: [[TMP7:%.*]] = sub i64 [[N]], [[TMP6]] -; CHECK-ORDERED-TF-NEXT: [[TMP8:%.*]] = icmp ugt i64 [[N]], [[TMP6]] -; CHECK-ORDERED-TF-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], i64 [[TMP7]], i64 0 +; CHECK-ORDERED-TF-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-ORDERED-TF-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 +; CHECK-ORDERED-TF-NEXT: [[TMP4:%.*]] = sub i64 [[N]], [[TMP3]] +; CHECK-ORDERED-TF-NEXT: [[TMP5:%.*]] = icmp ugt i64 [[N]], [[TMP3]] +; CHECK-ORDERED-TF-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], i64 [[TMP4]], i64 0 ; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 0, i64 [[N]]) ; CHECK-ORDERED-TF-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-ORDERED-TF: vector.body: ; CHECK-ORDERED-TF-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 8 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-ORDERED-TF-NEXT: [[VEC_PHI:%.*]] = phi float [ 0.000000e+00, [[VECTOR_PH]] ], [ [[TMP12:%.*]], [[VECTOR_BODY]] ] -; CHECK-ORDERED-TF-NEXT: [[TMP10:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] -; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP10]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK]], <vscale x 8 x float> poison) -; CHECK-ORDERED-TF-NEXT: [[TMP11:%.*]] = select <vscale x 8 x i1> [[ACTIVE_LANE_MASK]], <vscale x 8 x float> [[WIDE_MASKED_LOAD]], <vscale x 8 x float> splat (float -0.000000e+00) -; CHECK-ORDERED-TF-NEXT: [[TMP12]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[VEC_PHI]], <vscale x 8 x float> [[TMP11]]) -; CHECK-ORDERED-TF-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP4]] -; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[INDEX]], i64 [[TMP9]]) -; CHECK-ORDERED-TF-NEXT: [[TMP13:%.*]] = xor <vscale x 8 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) -; CHECK-ORDERED-TF-NEXT: [[TMP14:%.*]] = extractelement <vscale x 8 x i1> [[TMP13]], i32 0 -; CHECK-ORDERED-TF-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-ORDERED-TF-NEXT: [[VEC_PHI:%.*]] = phi float [ 0.000000e+00, [[VECTOR_PH]] ], [ [[TMP9:%.*]], [[VECTOR_BODY]] ] +; CHECK-ORDERED-TF-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] +; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP7]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK]], <vscale x 8 x float> poison) +; CHECK-ORDERED-TF-NEXT: [[TMP8:%.*]] = select <vscale x 8 x i1> [[ACTIVE_LANE_MASK]], <vscale x 8 x float> [[WIDE_MASKED_LOAD]], <vscale x 8 x float> splat (float -0.000000e+00) +; CHECK-ORDERED-TF-NEXT: [[TMP9]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[VEC_PHI]], <vscale x 8 x float> [[TMP8]]) +; CHECK-ORDERED-TF-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]] +; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[INDEX]], i64 [[TMP6]]) +; CHECK-ORDERED-TF-NEXT: [[TMP10:%.*]] = xor <vscale x 8 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) +; CHECK-ORDERED-TF-NEXT: [[TMP11:%.*]] = extractelement <vscale x 8 x i1> [[TMP10]], i32 0 +; CHECK-ORDERED-TF-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-ORDERED-TF: middle.block: ; CHECK-ORDERED-TF-NEXT: br label [[FOR_END:%.*]] ; CHECK-ORDERED-TF: scalar.ph: @@ -162,13 +152,13 @@ define float @fadd_strict(ptr noalias nocapture readonly %a, i64 %n) #0 { ; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-ORDERED-TF-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] ; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; CHECK-ORDERED-TF-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-ORDERED-TF-NEXT: [[ADD]] = fadd float [[TMP15]], [[SUM_07]] +; CHECK-ORDERED-TF-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; CHECK-ORDERED-TF-NEXT: [[ADD]] = fadd float [[TMP12]], [[SUM_07]] ; CHECK-ORDERED-TF-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-ORDERED-TF-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] ; CHECK-ORDERED-TF-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK-ORDERED-TF: for.end: -; CHECK-ORDERED-TF-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], [[FOR_BODY]] ], [ [[TMP12]], [[MIDDLE_BLOCK]] ] +; CHECK-ORDERED-TF-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], [[FOR_BODY]] ], [ [[TMP9]], [[MIDDLE_BLOCK]] ] ; CHECK-ORDERED-TF-NEXT: ret float [[ADD_LCSSA]] ; @@ -222,58 +212,56 @@ define float @fadd_strict_unroll(ptr noalias nocapture readonly %a, i64 %n) #0 { ; CHECK-UNORDERED-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 32 ; CHECK-UNORDERED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; CHECK-UNORDERED-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; CHECK-UNORDERED-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-UNORDERED-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 32 ; CHECK-UNORDERED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-UNORDERED: vector.body: ; CHECK-UNORDERED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-UNORDERED-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x float> [ insertelement (<vscale x 8 x float> splat (float -0.000000e+00), float 0.000000e+00, i32 0), [[VECTOR_PH]] ], [ [[TMP16:%.*]], [[VECTOR_BODY]] ] -; CHECK-UNORDERED-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 8 x float> [ splat (float -0.000000e+00), [[VECTOR_PH]] ], [ [[TMP17:%.*]], [[VECTOR_BODY]] ] -; CHECK-UNORDERED-NEXT: [[VEC_PHI2:%.*]] = phi <vscale x 8 x float> [ splat (float -0.000000e+00), [[VECTOR_PH]] ], [ [[TMP18:%.*]], [[VECTOR_BODY]] ] -; CHECK-UNORDERED-NEXT: [[VEC_PHI3:%.*]] = phi <vscale x 8 x float> [ splat (float -0.000000e+00), [[VECTOR_PH]] ], [ [[TMP19:%.*]], [[VECTOR_BODY]] ] -; CHECK-UNORDERED-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] -; CHECK-UNORDERED-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-UNORDERED-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 8 -; CHECK-UNORDERED-NEXT: [[TMP9:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i64 [[TMP8]] -; CHECK-UNORDERED-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-UNORDERED-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 16 -; CHECK-UNORDERED-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i64 [[TMP11]] -; CHECK-UNORDERED-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-UNORDERED-NEXT: [[TMP14:%.*]] = mul nuw i64 [[TMP13]], 24 -; CHECK-UNORDERED-NEXT: [[TMP15:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i64 [[TMP14]] -; CHECK-UNORDERED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x float>, ptr [[TMP6]], align 4 -; CHECK-UNORDERED-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 8 x float>, ptr [[TMP9]], align 4 -; CHECK-UNORDERED-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 8 x float>, ptr [[TMP12]], align 4 -; CHECK-UNORDERED-NEXT: [[WIDE_LOAD6:%.*]] = load <vscale x 8 x float>, ptr [[TMP15]], align 4 -; CHECK-UNORDERED-NEXT: [[TMP16]] = fadd <vscale x 8 x float> [[WIDE_LOAD]], [[VEC_PHI]] -; CHECK-UNORDERED-NEXT: [[TMP17]] = fadd <vscale x 8 x float> [[WIDE_LOAD4]], [[VEC_PHI1]] -; CHECK-UNORDERED-NEXT: [[TMP18]] = fadd <vscale x 8 x float> [[WIDE_LOAD5]], [[VEC_PHI2]] -; CHECK-UNORDERED-NEXT: [[TMP19]] = fadd <vscale x 8 x float> [[WIDE_LOAD6]], [[VEC_PHI3]] -; CHECK-UNORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-UNORDERED-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-UNORDERED-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-UNORDERED-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x float> [ insertelement (<vscale x 8 x float> splat (float -0.000000e+00), float 0.000000e+00, i32 0), [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ] +; CHECK-UNORDERED-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 8 x float> [ splat (float -0.000000e+00), [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ] +; CHECK-UNORDERED-NEXT: [[VEC_PHI2:%.*]] = phi <vscale x 8 x float> [ splat (float -0.000000e+00), [[VECTOR_PH]] ], [ [[TMP16:%.*]], [[VECTOR_BODY]] ] +; CHECK-UNORDERED-NEXT: [[VEC_PHI3:%.*]] = phi <vscale x 8 x float> [ splat (float -0.000000e+00), [[VECTOR_PH]] ], [ [[TMP17:%.*]], [[VECTOR_BODY]] ] +; CHECK-UNORDERED-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] +; CHECK-UNORDERED-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-UNORDERED-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 8 +; CHECK-UNORDERED-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i64 [[TMP6]] +; CHECK-UNORDERED-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-UNORDERED-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 16 +; CHECK-UNORDERED-NEXT: [[TMP10:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i64 [[TMP9]] +; CHECK-UNORDERED-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-UNORDERED-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 24 +; CHECK-UNORDERED-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i64 [[TMP12]] +; CHECK-UNORDERED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x float>, ptr [[TMP4]], align 4 +; CHECK-UNORDERED-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 8 x float>, ptr [[TMP7]], align 4 +; CHECK-UNORDERED-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 8 x float>, ptr [[TMP10]], align 4 +; CHECK-UNORDERED-NEXT: [[WIDE_LOAD6:%.*]] = load <vscale x 8 x float>, ptr [[TMP13]], align 4 +; CHECK-UNORDERED-NEXT: [[TMP14]] = fadd <vscale x 8 x float> [[WIDE_LOAD]], [[VEC_PHI]] +; CHECK-UNORDERED-NEXT: [[TMP15]] = fadd <vscale x 8 x float> [[WIDE_LOAD4]], [[VEC_PHI1]] +; CHECK-UNORDERED-NEXT: [[TMP16]] = fadd <vscale x 8 x float> [[WIDE_LOAD5]], [[VEC_PHI2]] +; CHECK-UNORDERED-NEXT: [[TMP17]] = fadd <vscale x 8 x float> [[WIDE_LOAD6]], [[VEC_PHI3]] +; CHECK-UNORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] +; CHECK-UNORDERED-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-UNORDERED-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK-UNORDERED: middle.block: -; CHECK-UNORDERED-NEXT: [[BIN_RDX:%.*]] = fadd <vscale x 8 x float> [[TMP17]], [[TMP16]] -; CHECK-UNORDERED-NEXT: [[BIN_RDX7:%.*]] = fadd <vscale x 8 x float> [[TMP18]], [[BIN_RDX]] -; CHECK-UNORDERED-NEXT: [[BIN_RDX8:%.*]] = fadd <vscale x 8 x float> [[TMP19]], [[BIN_RDX7]] -; CHECK-UNORDERED-NEXT: [[TMP21:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float -0.000000e+00, <vscale x 8 x float> [[BIN_RDX8]]) +; CHECK-UNORDERED-NEXT: [[BIN_RDX:%.*]] = fadd <vscale x 8 x float> [[TMP15]], [[TMP14]] +; CHECK-UNORDERED-NEXT: [[BIN_RDX7:%.*]] = fadd <vscale x 8 x float> [[TMP16]], [[BIN_RDX]] +; CHECK-UNORDERED-NEXT: [[BIN_RDX8:%.*]] = fadd <vscale x 8 x float> [[TMP17]], [[BIN_RDX7]] +; CHECK-UNORDERED-NEXT: [[TMP19:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float -0.000000e+00, <vscale x 8 x float> [[BIN_RDX8]]) ; CHECK-UNORDERED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-UNORDERED-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] ; CHECK-UNORDERED: scalar.ph: ; CHECK-UNORDERED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] -; CHECK-UNORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP21]], [[MIDDLE_BLOCK]] ], [ 0.000000e+00, [[ENTRY]] ] +; CHECK-UNORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP19]], [[MIDDLE_BLOCK]] ], [ 0.000000e+00, [[ENTRY]] ] ; CHECK-UNORDERED-NEXT: br label [[FOR_BODY:%.*]] ; CHECK-UNORDERED: for.body: ; CHECK-UNORDERED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-UNORDERED-NEXT: [[SUM_07:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] ; CHECK-UNORDERED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; CHECK-UNORDERED-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-UNORDERED-NEXT: [[ADD]] = fadd float [[TMP22]], [[SUM_07]] +; CHECK-UNORDERED-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; CHECK-UNORDERED-NEXT: [[ADD]] = fadd float [[TMP20]], [[SUM_07]] ; CHECK-UNORDERED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-UNORDERED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] ; CHECK-UNORDERED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK-UNORDERED: for.end: -; CHECK-UNORDERED-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], [[FOR_BODY]] ], [ [[TMP21]], [[MIDDLE_BLOCK]] ] +; CHECK-UNORDERED-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], [[FOR_BODY]] ], [ [[TMP19]], [[MIDDLE_BLOCK]] ] ; CHECK-UNORDERED-NEXT: ret float [[ADD_LCSSA]] ; ; CHECK-ORDERED-LABEL: define float @fadd_strict_unroll @@ -288,51 +276,49 @@ define float @fadd_strict_unroll(ptr noalias nocapture readonly %a, i64 %n) #0 { ; CHECK-ORDERED-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 32 ; CHECK-ORDERED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; CHECK-ORDERED-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; CHECK-ORDERED-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 32 ; CHECK-ORDERED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-ORDERED: vector.body: ; CHECK-ORDERED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-ORDERED-NEXT: [[VEC_PHI:%.*]] = phi float [ 0.000000e+00, [[VECTOR_PH]] ], [ [[TMP19:%.*]], [[VECTOR_BODY]] ] -; CHECK-ORDERED-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] -; CHECK-ORDERED-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 8 -; CHECK-ORDERED-NEXT: [[TMP9:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i64 [[TMP8]] -; CHECK-ORDERED-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 16 -; CHECK-ORDERED-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i64 [[TMP11]] -; CHECK-ORDERED-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-NEXT: [[TMP14:%.*]] = mul nuw i64 [[TMP13]], 24 -; CHECK-ORDERED-NEXT: [[TMP15:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i64 [[TMP14]] -; CHECK-ORDERED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x float>, ptr [[TMP6]], align 4 -; CHECK-ORDERED-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 8 x float>, ptr [[TMP9]], align 4 -; CHECK-ORDERED-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 8 x float>, ptr [[TMP12]], align 4 -; CHECK-ORDERED-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 8 x float>, ptr [[TMP15]], align 4 -; CHECK-ORDERED-NEXT: [[TMP16:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[VEC_PHI]], <vscale x 8 x float> [[WIDE_LOAD]]) -; CHECK-ORDERED-NEXT: [[TMP17:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP16]], <vscale x 8 x float> [[WIDE_LOAD1]]) -; CHECK-ORDERED-NEXT: [[TMP18:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP17]], <vscale x 8 x float> [[WIDE_LOAD2]]) -; CHECK-ORDERED-NEXT: [[TMP19]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP18]], <vscale x 8 x float> [[WIDE_LOAD3]]) -; CHECK-ORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-ORDERED-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-ORDERED-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-ORDERED-NEXT: [[VEC_PHI:%.*]] = phi float [ 0.000000e+00, [[VECTOR_PH]] ], [ [[TMP17:%.*]], [[VECTOR_BODY]] ] +; CHECK-ORDERED-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] +; CHECK-ORDERED-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-ORDERED-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 8 +; CHECK-ORDERED-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i64 [[TMP6]] +; CHECK-ORDERED-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-ORDERED-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 16 +; CHECK-ORDERED-NEXT: [[TMP10:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i64 [[TMP9]] +; CHECK-ORDERED-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-ORDERED-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 24 +; CHECK-ORDERED-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i64 [[TMP12]] +; CHECK-ORDERED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x float>, ptr [[TMP4]], align 4 +; CHECK-ORDERED-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 8 x float>, ptr [[TMP7]], align 4 +; CHECK-ORDERED-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 8 x float>, ptr [[TMP10]], align 4 +; CHECK-ORDERED-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 8 x float>, ptr [[TMP13]], align 4 +; CHECK-ORDERED-NEXT: [[TMP14:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[VEC_PHI]], <vscale x 8 x float> [[WIDE_LOAD]]) +; CHECK-ORDERED-NEXT: [[TMP15:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP14]], <vscale x 8 x float> [[WIDE_LOAD1]]) +; CHECK-ORDERED-NEXT: [[TMP16:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP15]], <vscale x 8 x float> [[WIDE_LOAD2]]) +; CHECK-ORDERED-NEXT: [[TMP17]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP16]], <vscale x 8 x float> [[WIDE_LOAD3]]) +; CHECK-ORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] +; CHECK-ORDERED-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-ORDERED-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK-ORDERED: middle.block: ; CHECK-ORDERED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-ORDERED-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] ; CHECK-ORDERED: scalar.ph: ; CHECK-ORDERED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] -; CHECK-ORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP19]], [[MIDDLE_BLOCK]] ], [ 0.000000e+00, [[ENTRY]] ] +; CHECK-ORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP17]], [[MIDDLE_BLOCK]] ], [ 0.000000e+00, [[ENTRY]] ] ; CHECK-ORDERED-NEXT: br label [[FOR_BODY:%.*]] ; CHECK-ORDERED: for.body: ; CHECK-ORDERED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-ORDERED-NEXT: [[SUM_07:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] ; CHECK-ORDERED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; CHECK-ORDERED-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-ORDERED-NEXT: [[ADD]] = fadd float [[TMP21]], [[SUM_07]] +; CHECK-ORDERED-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; CHECK-ORDERED-NEXT: [[ADD]] = fadd float [[TMP19]], [[SUM_07]] ; CHECK-ORDERED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-ORDERED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] ; CHECK-ORDERED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK-ORDERED: for.end: -; CHECK-ORDERED-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], [[FOR_BODY]] ], [ [[TMP19]], [[MIDDLE_BLOCK]] ] +; CHECK-ORDERED-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], [[FOR_BODY]] ], [ [[TMP17]], [[MIDDLE_BLOCK]] ] ; CHECK-ORDERED-NEXT: ret float [[ADD_LCSSA]] ; ; CHECK-ORDERED-TF-LABEL: define float @fadd_strict_unroll @@ -342,26 +328,20 @@ define float @fadd_strict_unroll(ptr noalias nocapture readonly %a, i64 %n) #0 { ; CHECK-ORDERED-TF: vector.ph: ; CHECK-ORDERED-TF-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-ORDERED-TF-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 32 -; CHECK-ORDERED-TF-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 -; CHECK-ORDERED-TF-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP2]] -; CHECK-ORDERED-TF-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; CHECK-ORDERED-TF-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] -; CHECK-ORDERED-TF-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-TF-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 32 -; CHECK-ORDERED-TF-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-TF-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 32 -; CHECK-ORDERED-TF-NEXT: [[TMP7:%.*]] = sub i64 [[N]], [[TMP6]] -; CHECK-ORDERED-TF-NEXT: [[TMP8:%.*]] = icmp ugt i64 [[N]], [[TMP6]] -; CHECK-ORDERED-TF-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], i64 [[TMP7]], i64 0 -; CHECK-ORDERED-TF-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-TF-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 8 -; CHECK-ORDERED-TF-NEXT: [[INDEX_PART_NEXT:%.*]] = add i64 0, [[TMP11]] -; CHECK-ORDERED-TF-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-TF-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP12]], 16 -; CHECK-ORDERED-TF-NEXT: [[INDEX_PART_NEXT1:%.*]] = add i64 0, [[TMP13]] -; CHECK-ORDERED-TF-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-TF-NEXT: [[TMP15:%.*]] = mul nuw i64 [[TMP14]], 24 -; CHECK-ORDERED-TF-NEXT: [[INDEX_PART_NEXT2:%.*]] = add i64 0, [[TMP15]] +; CHECK-ORDERED-TF-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-ORDERED-TF-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 32 +; CHECK-ORDERED-TF-NEXT: [[TMP4:%.*]] = sub i64 [[N]], [[TMP3]] +; CHECK-ORDERED-TF-NEXT: [[TMP5:%.*]] = icmp ugt i64 [[N]], [[TMP3]] +; CHECK-ORDERED-TF-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], i64 [[TMP4]], i64 0 +; CHECK-ORDERED-TF-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-ORDERED-TF-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 8 +; CHECK-ORDERED-TF-NEXT: [[INDEX_PART_NEXT:%.*]] = add i64 0, [[TMP8]] +; CHECK-ORDERED-TF-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-ORDERED-TF-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 16 +; CHECK-ORDERED-TF-NEXT: [[INDEX_PART_NEXT1:%.*]] = add i64 0, [[TMP10]] +; CHECK-ORDERED-TF-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-ORDERED-TF-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 24 +; CHECK-ORDERED-TF-NEXT: [[INDEX_PART_NEXT2:%.*]] = add i64 0, [[TMP12]] ; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 0, i64 [[N]]) ; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_ENTRY3:%.*]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[INDEX_PART_NEXT]], i64 [[N]]) ; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_ENTRY4:%.*]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[INDEX_PART_NEXT1]], i64 [[N]]) @@ -373,46 +353,46 @@ define float @fadd_strict_unroll(ptr noalias nocapture readonly %a, i64 %n) #0 { ; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK6:%.*]] = phi <vscale x 8 x i1> [ [[ACTIVE_LANE_MASK_ENTRY3]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT12:%.*]], [[VECTOR_BODY]] ] ; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK7:%.*]] = phi <vscale x 8 x i1> [ [[ACTIVE_LANE_MASK_ENTRY4]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT13:%.*]], [[VECTOR_BODY]] ] ; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK8:%.*]] = phi <vscale x 8 x i1> [ [[ACTIVE_LANE_MASK_ENTRY5]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT14:%.*]], [[VECTOR_BODY]] ] -; CHECK-ORDERED-TF-NEXT: [[VEC_PHI:%.*]] = phi float [ 0.000000e+00, [[VECTOR_PH]] ], [ [[TMP33:%.*]], [[VECTOR_BODY]] ] -; CHECK-ORDERED-TF-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] +; CHECK-ORDERED-TF-NEXT: [[VEC_PHI:%.*]] = phi float [ 0.000000e+00, [[VECTOR_PH]] ], [ [[TMP30:%.*]], [[VECTOR_BODY]] ] +; CHECK-ORDERED-TF-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] +; CHECK-ORDERED-TF-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-ORDERED-TF-NEXT: [[TMP15:%.*]] = mul nuw i64 [[TMP14]], 8 +; CHECK-ORDERED-TF-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i64 [[TMP15]] ; CHECK-ORDERED-TF-NEXT: [[TMP17:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-TF-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP17]], 8 -; CHECK-ORDERED-TF-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[TMP18]] +; CHECK-ORDERED-TF-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP17]], 16 +; CHECK-ORDERED-TF-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i64 [[TMP18]] ; CHECK-ORDERED-TF-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-TF-NEXT: [[TMP21:%.*]] = mul nuw i64 [[TMP20]], 16 -; CHECK-ORDERED-TF-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[TMP21]] -; CHECK-ORDERED-TF-NEXT: [[TMP23:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-TF-NEXT: [[TMP24:%.*]] = mul nuw i64 [[TMP23]], 24 -; CHECK-ORDERED-TF-NEXT: [[TMP25:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[TMP24]] -; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP16]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK]], <vscale x 8 x float> poison) -; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD9:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP19]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK6]], <vscale x 8 x float> poison) -; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD10:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP22]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK7]], <vscale x 8 x float> poison) -; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD11:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP25]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK8]], <vscale x 8 x float> poison) -; CHECK-ORDERED-TF-NEXT: [[TMP26:%.*]] = select <vscale x 8 x i1> [[ACTIVE_LANE_MASK]], <vscale x 8 x float> [[WIDE_MASKED_LOAD]], <vscale x 8 x float> splat (float -0.000000e+00) -; CHECK-ORDERED-TF-NEXT: [[TMP27:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[VEC_PHI]], <vscale x 8 x float> [[TMP26]]) -; CHECK-ORDERED-TF-NEXT: [[TMP28:%.*]] = select <vscale x 8 x i1> [[ACTIVE_LANE_MASK6]], <vscale x 8 x float> [[WIDE_MASKED_LOAD9]], <vscale x 8 x float> splat (float -0.000000e+00) -; CHECK-ORDERED-TF-NEXT: [[TMP29:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP27]], <vscale x 8 x float> [[TMP28]]) -; CHECK-ORDERED-TF-NEXT: [[TMP30:%.*]] = select <vscale x 8 x i1> [[ACTIVE_LANE_MASK7]], <vscale x 8 x float> [[WIDE_MASKED_LOAD10]], <vscale x 8 x float> splat (float -0.000000e+00) -; CHECK-ORDERED-TF-NEXT: [[TMP31:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP29]], <vscale x 8 x float> [[TMP30]]) -; CHECK-ORDERED-TF-NEXT: [[TMP32:%.*]] = select <vscale x 8 x i1> [[ACTIVE_LANE_MASK8]], <vscale x 8 x float> [[WIDE_MASKED_LOAD11]], <vscale x 8 x float> splat (float -0.000000e+00) -; CHECK-ORDERED-TF-NEXT: [[TMP33]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP31]], <vscale x 8 x float> [[TMP32]]) -; CHECK-ORDERED-TF-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP4]] +; CHECK-ORDERED-TF-NEXT: [[TMP21:%.*]] = mul nuw i64 [[TMP20]], 24 +; CHECK-ORDERED-TF-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i64 [[TMP21]] +; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP13]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK]], <vscale x 8 x float> poison) +; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD9:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP16]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK6]], <vscale x 8 x float> poison) +; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD10:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP19]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK7]], <vscale x 8 x float> poison) +; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD11:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP22]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK8]], <vscale x 8 x float> poison) +; CHECK-ORDERED-TF-NEXT: [[TMP23:%.*]] = select <vscale x 8 x i1> [[ACTIVE_LANE_MASK]], <vscale x 8 x float> [[WIDE_MASKED_LOAD]], <vscale x 8 x float> splat (float -0.000000e+00) +; CHECK-ORDERED-TF-NEXT: [[TMP24:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[VEC_PHI]], <vscale x 8 x float> [[TMP23]]) +; CHECK-ORDERED-TF-NEXT: [[TMP25:%.*]] = select <vscale x 8 x i1> [[ACTIVE_LANE_MASK6]], <vscale x 8 x float> [[WIDE_MASKED_LOAD9]], <vscale x 8 x float> splat (float -0.000000e+00) +; CHECK-ORDERED-TF-NEXT: [[TMP26:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP24]], <vscale x 8 x float> [[TMP25]]) +; CHECK-ORDERED-TF-NEXT: [[TMP27:%.*]] = select <vscale x 8 x i1> [[ACTIVE_LANE_MASK7]], <vscale x 8 x float> [[WIDE_MASKED_LOAD10]], <vscale x 8 x float> splat (float -0.000000e+00) +; CHECK-ORDERED-TF-NEXT: [[TMP28:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP26]], <vscale x 8 x float> [[TMP27]]) +; CHECK-ORDERED-TF-NEXT: [[TMP29:%.*]] = select <vscale x 8 x i1> [[ACTIVE_LANE_MASK8]], <vscale x 8 x float> [[WIDE_MASKED_LOAD11]], <vscale x 8 x float> splat (float -0.000000e+00) +; CHECK-ORDERED-TF-NEXT: [[TMP30]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP28]], <vscale x 8 x float> [[TMP29]]) +; CHECK-ORDERED-TF-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]] +; CHECK-ORDERED-TF-NEXT: [[TMP31:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-ORDERED-TF-NEXT: [[TMP32:%.*]] = mul nuw i64 [[TMP31]], 8 +; CHECK-ORDERED-TF-NEXT: [[TMP33:%.*]] = add i64 [[INDEX]], [[TMP32]] ; CHECK-ORDERED-TF-NEXT: [[TMP34:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-TF-NEXT: [[TMP35:%.*]] = mul nuw i64 [[TMP34]], 8 +; CHECK-ORDERED-TF-NEXT: [[TMP35:%.*]] = mul nuw i64 [[TMP34]], 16 ; CHECK-ORDERED-TF-NEXT: [[TMP36:%.*]] = add i64 [[INDEX]], [[TMP35]] ; CHECK-ORDERED-TF-NEXT: [[TMP37:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-TF-NEXT: [[TMP38:%.*]] = mul nuw i64 [[TMP37]], 16 +; CHECK-ORDERED-TF-NEXT: [[TMP38:%.*]] = mul nuw i64 [[TMP37]], 24 ; CHECK-ORDERED-TF-NEXT: [[TMP39:%.*]] = add i64 [[INDEX]], [[TMP38]] -; CHECK-ORDERED-TF-NEXT: [[TMP40:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-TF-NEXT: [[TMP41:%.*]] = mul nuw i64 [[TMP40]], 24 -; CHECK-ORDERED-TF-NEXT: [[TMP42:%.*]] = add i64 [[INDEX]], [[TMP41]] -; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[INDEX]], i64 [[TMP9]]) -; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT12]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP36]], i64 [[TMP9]]) -; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT13]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP39]], i64 [[TMP9]]) -; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT14]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP42]], i64 [[TMP9]]) -; CHECK-ORDERED-TF-NEXT: [[TMP43:%.*]] = xor <vscale x 8 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) -; CHECK-ORDERED-TF-NEXT: [[TMP44:%.*]] = extractelement <vscale x 8 x i1> [[TMP43]], i32 0 -; CHECK-ORDERED-TF-NEXT: br i1 [[TMP44]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[INDEX]], i64 [[TMP6]]) +; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT12]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP33]], i64 [[TMP6]]) +; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT13]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP36]], i64 [[TMP6]]) +; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT14]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP39]], i64 [[TMP6]]) +; CHECK-ORDERED-TF-NEXT: [[TMP40:%.*]] = xor <vscale x 8 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) +; CHECK-ORDERED-TF-NEXT: [[TMP41:%.*]] = extractelement <vscale x 8 x i1> [[TMP40]], i32 0 +; CHECK-ORDERED-TF-NEXT: br i1 [[TMP41]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK-ORDERED-TF: middle.block: ; CHECK-ORDERED-TF-NEXT: br label [[FOR_END:%.*]] ; CHECK-ORDERED-TF: scalar.ph: @@ -423,13 +403,13 @@ define float @fadd_strict_unroll(ptr noalias nocapture readonly %a, i64 %n) #0 { ; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-ORDERED-TF-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] ; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; CHECK-ORDERED-TF-NEXT: [[TMP45:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-ORDERED-TF-NEXT: [[ADD]] = fadd float [[TMP45]], [[SUM_07]] +; CHECK-ORDERED-TF-NEXT: [[TMP42:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; CHECK-ORDERED-TF-NEXT: [[ADD]] = fadd float [[TMP42]], [[SUM_07]] ; CHECK-ORDERED-TF-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-ORDERED-TF-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] ; CHECK-ORDERED-TF-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK-ORDERED-TF: for.end: -; CHECK-ORDERED-TF-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], [[FOR_BODY]] ], [ [[TMP33]], [[MIDDLE_BLOCK]] ] +; CHECK-ORDERED-TF-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], [[FOR_BODY]] ], [ [[TMP30]], [[MIDDLE_BLOCK]] ] ; CHECK-ORDERED-TF-NEXT: ret float [[ADD_LCSSA]] ; @@ -500,54 +480,52 @@ define void @fadd_strict_interleave(ptr noalias nocapture readonly %a, ptr noali ; CHECK-UNORDERED-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4 ; CHECK-UNORDERED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], [[TMP6]] ; CHECK-UNORDERED-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[N_MOD_VF]] -; CHECK-UNORDERED-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-UNORDERED-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 -; CHECK-UNORDERED-NEXT: [[TMP9:%.*]] = mul i64 [[N_VEC]], 2 -; CHECK-UNORDERED-NEXT: [[TMP10:%.*]] = insertelement <vscale x 4 x float> splat (float -0.000000e+00), float [[A2]], i32 0 -; CHECK-UNORDERED-NEXT: [[TMP11:%.*]] = insertelement <vscale x 4 x float> splat (float -0.000000e+00), float [[A1]], i32 0 +; CHECK-UNORDERED-NEXT: [[TMP7:%.*]] = mul i64 [[N_VEC]], 2 +; CHECK-UNORDERED-NEXT: [[TMP8:%.*]] = insertelement <vscale x 4 x float> splat (float -0.000000e+00), float [[A2]], i32 0 +; CHECK-UNORDERED-NEXT: [[TMP9:%.*]] = insertelement <vscale x 4 x float> splat (float -0.000000e+00), float [[A1]], i32 0 ; CHECK-UNORDERED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-UNORDERED: vector.body: ; CHECK-UNORDERED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-UNORDERED-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ [[TMP10]], [[VECTOR_PH]] ], [ [[TMP16:%.*]], [[VECTOR_BODY]] ] -; CHECK-UNORDERED-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 4 x float> [ [[TMP11]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ] +; CHECK-UNORDERED-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ [[TMP8]], [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ] +; CHECK-UNORDERED-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 4 x float> [ [[TMP9]], [[VECTOR_PH]] ], [ [[TMP13:%.*]], [[VECTOR_BODY]] ] ; CHECK-UNORDERED-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 2 -; CHECK-UNORDERED-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[OFFSET_IDX]] -; CHECK-UNORDERED-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x float>, ptr [[TMP12]], align 4 +; CHECK-UNORDERED-NEXT: [[TMP10:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[OFFSET_IDX]] +; CHECK-UNORDERED-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x float>, ptr [[TMP10]], align 4 ; CHECK-UNORDERED-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.vector.deinterleave2.nxv8f32(<vscale x 8 x float> [[WIDE_VEC]]) -; CHECK-UNORDERED-NEXT: [[TMP13:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[STRIDED_VEC]], 0 -; CHECK-UNORDERED-NEXT: [[TMP14:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[STRIDED_VEC]], 1 -; CHECK-UNORDERED-NEXT: [[TMP15]] = fadd <vscale x 4 x float> [[TMP13]], [[VEC_PHI1]] -; CHECK-UNORDERED-NEXT: [[TMP16]] = fadd <vscale x 4 x float> [[TMP14]], [[VEC_PHI]] -; CHECK-UNORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] -; CHECK-UNORDERED-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-UNORDERED-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-UNORDERED-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[STRIDED_VEC]], 0 +; CHECK-UNORDERED-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[STRIDED_VEC]], 1 +; CHECK-UNORDERED-NEXT: [[TMP13]] = fadd <vscale x 4 x float> [[TMP11]], [[VEC_PHI1]] +; CHECK-UNORDERED-NEXT: [[TMP14]] = fadd <vscale x 4 x float> [[TMP12]], [[VEC_PHI]] +; CHECK-UNORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] +; CHECK-UNORDERED-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-UNORDERED-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK-UNORDERED: middle.block: -; CHECK-UNORDERED-NEXT: [[TMP18:%.*]] = call float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[TMP16]]) -; CHECK-UNORDERED-NEXT: [[TMP19:%.*]] = call float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[TMP15]]) +; CHECK-UNORDERED-NEXT: [[TMP16:%.*]] = call float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[TMP14]]) +; CHECK-UNORDERED-NEXT: [[TMP17:%.*]] = call float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[TMP13]]) ; CHECK-UNORDERED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP2]], [[N_VEC]] ; CHECK-UNORDERED-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] ; CHECK-UNORDERED: scalar.ph: -; CHECK-UNORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP18]], [[MIDDLE_BLOCK]] ], [ [[A2]], [[ENTRY:%.*]] ] -; CHECK-UNORDERED-NEXT: [[BC_MERGE_RDX2:%.*]] = phi float [ [[TMP19]], [[MIDDLE_BLOCK]] ], [ [[A1]], [[ENTRY]] ] -; CHECK-UNORDERED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP9]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ] +; CHECK-UNORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP16]], [[MIDDLE_BLOCK]] ], [ [[A2]], [[ENTRY:%.*]] ] +; CHECK-UNORDERED-NEXT: [[BC_MERGE_RDX2:%.*]] = phi float [ [[TMP17]], [[MIDDLE_BLOCK]] ], [ [[A1]], [[ENTRY]] ] +; CHECK-UNORDERED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP7]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ] ; CHECK-UNORDERED-NEXT: br label [[FOR_BODY:%.*]] ; CHECK-UNORDERED: for.body: ; CHECK-UNORDERED-NEXT: [[ADD_PHI1:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD2:%.*]], [[FOR_BODY]] ] ; CHECK-UNORDERED-NEXT: [[ADD_PHI2:%.*]] = phi float [ [[BC_MERGE_RDX2]], [[SCALAR_PH]] ], [ [[ADD1:%.*]], [[FOR_BODY]] ] ; CHECK-UNORDERED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-UNORDERED-NEXT: [[ARRAYIDXB1:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]] -; CHECK-UNORDERED-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDXB1]], align 4 -; CHECK-UNORDERED-NEXT: [[ADD1]] = fadd float [[TMP20]], [[ADD_PHI2]] +; CHECK-UNORDERED-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDXB1]], align 4 +; CHECK-UNORDERED-NEXT: [[ADD1]] = fadd float [[TMP18]], [[ADD_PHI2]] ; CHECK-UNORDERED-NEXT: [[OR:%.*]] = or disjoint i64 [[IV]], 1 ; CHECK-UNORDERED-NEXT: [[ARRAYIDXB2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[OR]] -; CHECK-UNORDERED-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDXB2]], align 4 -; CHECK-UNORDERED-NEXT: [[ADD2]] = fadd float [[TMP21]], [[ADD_PHI1]] +; CHECK-UNORDERED-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDXB2]], align 4 +; CHECK-UNORDERED-NEXT: [[ADD2]] = fadd float [[TMP19]], [[ADD_PHI1]] ; CHECK-UNORDERED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 2 ; CHECK-UNORDERED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] ; CHECK-UNORDERED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK-UNORDERED: for.end: -; CHECK-UNORDERED-NEXT: [[ADD1_LCSSA:%.*]] = phi float [ [[ADD1]], [[FOR_BODY]] ], [ [[TMP19]], [[MIDDLE_BLOCK]] ] -; CHECK-UNORDERED-NEXT: [[ADD2_LCSSA:%.*]] = phi float [ [[ADD2]], [[FOR_BODY]] ], [ [[TMP18]], [[MIDDLE_BLOCK]] ] +; CHECK-UNORDERED-NEXT: [[ADD1_LCSSA:%.*]] = phi float [ [[ADD1]], [[FOR_BODY]] ], [ [[TMP17]], [[MIDDLE_BLOCK]] ] +; CHECK-UNORDERED-NEXT: [[ADD2_LCSSA:%.*]] = phi float [ [[ADD2]], [[FOR_BODY]] ], [ [[TMP16]], [[MIDDLE_BLOCK]] ] ; CHECK-UNORDERED-NEXT: store float [[ADD1_LCSSA]], ptr [[A]], align 4 ; CHECK-UNORDERED-NEXT: store float [[ADD2_LCSSA]], ptr [[ARRAYIDXA]], align 4 ; CHECK-UNORDERED-NEXT: ret void @@ -570,50 +548,48 @@ define void @fadd_strict_interleave(ptr noalias nocapture readonly %a, ptr noali ; CHECK-ORDERED-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4 ; CHECK-ORDERED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], [[TMP6]] ; CHECK-ORDERED-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[N_MOD_VF]] -; CHECK-ORDERED-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 -; CHECK-ORDERED-NEXT: [[TMP9:%.*]] = mul i64 [[N_VEC]], 2 +; CHECK-ORDERED-NEXT: [[TMP7:%.*]] = mul i64 [[N_VEC]], 2 ; CHECK-ORDERED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-ORDERED: vector.body: ; CHECK-ORDERED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-ORDERED-NEXT: [[VEC_PHI:%.*]] = phi float [ [[A2]], [[VECTOR_PH]] ], [ [[TMP13:%.*]], [[VECTOR_BODY]] ] -; CHECK-ORDERED-NEXT: [[VEC_PHI1:%.*]] = phi float [ [[A1]], [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ] +; CHECK-ORDERED-NEXT: [[VEC_PHI:%.*]] = phi float [ [[A2]], [[VECTOR_PH]] ], [ [[TMP11:%.*]], [[VECTOR_BODY]] ] +; CHECK-ORDERED-NEXT: [[VEC_PHI1:%.*]] = phi float [ [[A1]], [[VECTOR_PH]] ], [ [[TMP12:%.*]], [[VECTOR_BODY]] ] ; CHECK-ORDERED-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 2 -; CHECK-ORDERED-NEXT: [[TMP10:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[OFFSET_IDX]] -; CHECK-ORDERED-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x float>, ptr [[TMP10]], align 4 +; CHECK-ORDERED-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[OFFSET_IDX]] +; CHECK-ORDERED-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x float>, ptr [[TMP8]], align 4 ; CHECK-ORDERED-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.vector.deinterleave2.nxv8f32(<vscale x 8 x float> [[WIDE_VEC]]) -; CHECK-ORDERED-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[STRIDED_VEC]], 0 -; CHECK-ORDERED-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[STRIDED_VEC]], 1 -; CHECK-ORDERED-NEXT: [[TMP13]] = call float @llvm.vector.reduce.fadd.nxv4f32(float [[VEC_PHI]], <vscale x 4 x float> [[TMP12]]) -; CHECK-ORDERED-NEXT: [[TMP14]] = call float @llvm.vector.reduce.fadd.nxv4f32(float [[VEC_PHI1]], <vscale x 4 x float> [[TMP11]]) -; CHECK-ORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] -; CHECK-ORDERED-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-ORDERED-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-ORDERED-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[STRIDED_VEC]], 0 +; CHECK-ORDERED-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[STRIDED_VEC]], 1 +; CHECK-ORDERED-NEXT: [[TMP11]] = call float @llvm.vector.reduce.fadd.nxv4f32(float [[VEC_PHI]], <vscale x 4 x float> [[TMP10]]) +; CHECK-ORDERED-NEXT: [[TMP12]] = call float @llvm.vector.reduce.fadd.nxv4f32(float [[VEC_PHI1]], <vscale x 4 x float> [[TMP9]]) +; CHECK-ORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] +; CHECK-ORDERED-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-ORDERED-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK-ORDERED: middle.block: ; CHECK-ORDERED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP2]], [[N_VEC]] ; CHECK-ORDERED-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] ; CHECK-ORDERED: scalar.ph: -; CHECK-ORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP13]], [[MIDDLE_BLOCK]] ], [ [[A2]], [[ENTRY:%.*]] ] -; CHECK-ORDERED-NEXT: [[BC_MERGE_RDX2:%.*]] = phi float [ [[TMP14]], [[MIDDLE_BLOCK]] ], [ [[A1]], [[ENTRY]] ] -; CHECK-ORDERED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP9]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ] +; CHECK-ORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP11]], [[MIDDLE_BLOCK]] ], [ [[A2]], [[ENTRY:%.*]] ] +; CHECK-ORDERED-NEXT: [[BC_MERGE_RDX2:%.*]] = phi float [ [[TMP12]], [[MIDDLE_BLOCK]] ], [ [[A1]], [[ENTRY]] ] +; CHECK-ORDERED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP7]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ] ; CHECK-ORDERED-NEXT: br label [[FOR_BODY:%.*]] ; CHECK-ORDERED: for.body: ; CHECK-ORDERED-NEXT: [[ADD_PHI1:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD2:%.*]], [[FOR_BODY]] ] ; CHECK-ORDERED-NEXT: [[ADD_PHI2:%.*]] = phi float [ [[BC_MERGE_RDX2]], [[SCALAR_PH]] ], [ [[ADD1:%.*]], [[FOR_BODY]] ] ; CHECK-ORDERED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-ORDERED-NEXT: [[ARRAYIDXB1:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]] -; CHECK-ORDERED-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDXB1]], align 4 -; CHECK-ORDERED-NEXT: [[ADD1]] = fadd float [[TMP16]], [[ADD_PHI2]] +; CHECK-ORDERED-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDXB1]], align 4 +; CHECK-ORDERED-NEXT: [[ADD1]] = fadd float [[TMP14]], [[ADD_PHI2]] ; CHECK-ORDERED-NEXT: [[OR:%.*]] = or disjoint i64 [[IV]], 1 ; CHECK-ORDERED-NEXT: [[ARRAYIDXB2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[OR]] -; CHECK-ORDERED-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDXB2]], align 4 -; CHECK-ORDERED-NEXT: [[ADD2]] = fadd float [[TMP17]], [[ADD_PHI1]] +; CHECK-ORDERED-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDXB2]], align 4 +; CHECK-ORDERED-NEXT: [[ADD2]] = fadd float [[TMP15]], [[ADD_PHI1]] ; CHECK-ORDERED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 2 ; CHECK-ORDERED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] ; CHECK-ORDERED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK-ORDERED: for.end: -; CHECK-ORDERED-NEXT: [[ADD1_LCSSA:%.*]] = phi float [ [[ADD1]], [[FOR_BODY]] ], [ [[TMP14]], [[MIDDLE_BLOCK]] ] -; CHECK-ORDERED-NEXT: [[ADD2_LCSSA:%.*]] = phi float [ [[ADD2]], [[FOR_BODY]] ], [ [[TMP13]], [[MIDDLE_BLOCK]] ] +; CHECK-ORDERED-NEXT: [[ADD1_LCSSA:%.*]] = phi float [ [[ADD1]], [[FOR_BODY]] ], [ [[TMP12]], [[MIDDLE_BLOCK]] ] +; CHECK-ORDERED-NEXT: [[ADD2_LCSSA:%.*]] = phi float [ [[ADD2]], [[FOR_BODY]] ], [ [[TMP11]], [[MIDDLE_BLOCK]] ] ; CHECK-ORDERED-NEXT: store float [[ADD1_LCSSA]], ptr [[A]], align 4 ; CHECK-ORDERED-NEXT: store float [[ADD2_LCSSA]], ptr [[ARRAYIDXA]], align 4 ; CHECK-ORDERED-NEXT: ret void @@ -631,40 +607,34 @@ define void @fadd_strict_interleave(ptr noalias nocapture readonly %a, ptr noali ; CHECK-ORDERED-TF: vector.ph: ; CHECK-ORDERED-TF-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-ORDERED-TF-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 -; CHECK-ORDERED-TF-NEXT: [[TMP5:%.*]] = sub i64 [[TMP4]], 1 -; CHECK-ORDERED-TF-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP2]], [[TMP5]] -; CHECK-ORDERED-TF-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP4]] -; CHECK-ORDERED-TF-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] -; CHECK-ORDERED-TF-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-TF-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4 -; CHECK-ORDERED-TF-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-TF-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 4 -; CHECK-ORDERED-TF-NEXT: [[TMP10:%.*]] = sub i64 [[TMP2]], [[TMP9]] -; CHECK-ORDERED-TF-NEXT: [[TMP11:%.*]] = icmp ugt i64 [[TMP2]], [[TMP9]] -; CHECK-ORDERED-TF-NEXT: [[TMP12:%.*]] = select i1 [[TMP11]], i64 [[TMP10]], i64 0 +; CHECK-ORDERED-TF-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-ORDERED-TF-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4 +; CHECK-ORDERED-TF-NEXT: [[TMP7:%.*]] = sub i64 [[TMP2]], [[TMP6]] +; CHECK-ORDERED-TF-NEXT: [[TMP8:%.*]] = icmp ugt i64 [[TMP2]], [[TMP6]] +; CHECK-ORDERED-TF-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], i64 [[TMP7]], i64 0 ; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 [[TMP2]]) ; CHECK-ORDERED-TF-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-ORDERED-TF: vector.body: ; CHECK-ORDERED-TF-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 4 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-ORDERED-TF-NEXT: [[VEC_PHI:%.*]] = phi float [ [[A2]], [[VECTOR_PH]] ], [ [[TMP17:%.*]], [[VECTOR_BODY]] ] -; CHECK-ORDERED-TF-NEXT: [[VEC_PHI1:%.*]] = phi float [ [[A1]], [[VECTOR_PH]] ], [ [[TMP19:%.*]], [[VECTOR_BODY]] ] +; CHECK-ORDERED-TF-NEXT: [[VEC_PHI:%.*]] = phi float [ [[A2]], [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ] +; CHECK-ORDERED-TF-NEXT: [[VEC_PHI1:%.*]] = phi float [ [[A1]], [[VECTOR_PH]] ], [ [[TMP16:%.*]], [[VECTOR_BODY]] ] ; CHECK-ORDERED-TF-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 2 -; CHECK-ORDERED-TF-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[OFFSET_IDX]] +; CHECK-ORDERED-TF-NEXT: [[TMP10:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[OFFSET_IDX]] ; CHECK-ORDERED-TF-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 8 x i1> @llvm.vector.interleave2.nxv8i1(<vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK]]) -; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP13]], i32 4, <vscale x 8 x i1> [[INTERLEAVED_MASK]], <vscale x 8 x float> poison) +; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP10]], i32 4, <vscale x 8 x i1> [[INTERLEAVED_MASK]], <vscale x 8 x float> poison) ; CHECK-ORDERED-TF-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.vector.deinterleave2.nxv8f32(<vscale x 8 x float> [[WIDE_MASKED_VEC]]) -; CHECK-ORDERED-TF-NEXT: [[TMP14:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[STRIDED_VEC]], 0 -; CHECK-ORDERED-TF-NEXT: [[TMP15:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[STRIDED_VEC]], 1 -; CHECK-ORDERED-TF-NEXT: [[TMP16:%.*]] = select <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x float> [[TMP15]], <vscale x 4 x float> splat (float -0.000000e+00) -; CHECK-ORDERED-TF-NEXT: [[TMP17]] = call float @llvm.vector.reduce.fadd.nxv4f32(float [[VEC_PHI]], <vscale x 4 x float> [[TMP16]]) -; CHECK-ORDERED-TF-NEXT: [[TMP18:%.*]] = select <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x float> [[TMP14]], <vscale x 4 x float> splat (float -0.000000e+00) -; CHECK-ORDERED-TF-NEXT: [[TMP19]] = call float @llvm.vector.reduce.fadd.nxv4f32(float [[VEC_PHI1]], <vscale x 4 x float> [[TMP18]]) -; CHECK-ORDERED-TF-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP7]] -; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP12]]) -; CHECK-ORDERED-TF-NEXT: [[TMP20:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) -; CHECK-ORDERED-TF-NEXT: [[TMP21:%.*]] = extractelement <vscale x 4 x i1> [[TMP20]], i32 0 -; CHECK-ORDERED-TF-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-ORDERED-TF-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[STRIDED_VEC]], 0 +; CHECK-ORDERED-TF-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[STRIDED_VEC]], 1 +; CHECK-ORDERED-TF-NEXT: [[TMP13:%.*]] = select <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x float> [[TMP12]], <vscale x 4 x float> splat (float -0.000000e+00) +; CHECK-ORDERED-TF-NEXT: [[TMP14]] = call float @llvm.vector.reduce.fadd.nxv4f32(float [[VEC_PHI]], <vscale x 4 x float> [[TMP13]]) +; CHECK-ORDERED-TF-NEXT: [[TMP15:%.*]] = select <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x float> [[TMP11]], <vscale x 4 x float> splat (float -0.000000e+00) +; CHECK-ORDERED-TF-NEXT: [[TMP16]] = call float @llvm.vector.reduce.fadd.nxv4f32(float [[VEC_PHI1]], <vscale x 4 x float> [[TMP15]]) +; CHECK-ORDERED-TF-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP4]] +; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP9]]) +; CHECK-ORDERED-TF-NEXT: [[TMP17:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) +; CHECK-ORDERED-TF-NEXT: [[TMP18:%.*]] = extractelement <vscale x 4 x i1> [[TMP17]], i32 0 +; CHECK-ORDERED-TF-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK-ORDERED-TF: middle.block: ; CHECK-ORDERED-TF-NEXT: br label [[FOR_END:%.*]] ; CHECK-ORDERED-TF: scalar.ph: @@ -677,18 +647,18 @@ define void @fadd_strict_interleave(ptr noalias nocapture readonly %a, ptr noali ; CHECK-ORDERED-TF-NEXT: [[ADD_PHI2:%.*]] = phi float [ [[A1]], [[SCALAR_PH]] ], [ [[ADD1:%.*]], [[FOR_BODY]] ] ; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-ORDERED-TF-NEXT: [[ARRAYIDXB1:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]] -; CHECK-ORDERED-TF-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDXB1]], align 4 -; CHECK-ORDERED-TF-NEXT: [[ADD1]] = fadd float [[TMP22]], [[ADD_PHI2]] +; CHECK-ORDERED-TF-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDXB1]], align 4 +; CHECK-ORDERED-TF-NEXT: [[ADD1]] = fadd float [[TMP19]], [[ADD_PHI2]] ; CHECK-ORDERED-TF-NEXT: [[OR:%.*]] = or disjoint i64 [[IV]], 1 ; CHECK-ORDERED-TF-NEXT: [[ARRAYIDXB2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[OR]] -; CHECK-ORDERED-TF-NEXT: [[TMP23:%.*]] = load float, ptr [[ARRAYIDXB2]], align 4 -; CHECK-ORDERED-TF-NEXT: [[ADD2]] = fadd float [[TMP23]], [[ADD_PHI1]] +; CHECK-ORDERED-TF-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDXB2]], align 4 +; CHECK-ORDERED-TF-NEXT: [[ADD2]] = fadd float [[TMP20]], [[ADD_PHI1]] ; CHECK-ORDERED-TF-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 2 ; CHECK-ORDERED-TF-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] ; CHECK-ORDERED-TF-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK-ORDERED-TF: for.end: -; CHECK-ORDERED-TF-NEXT: [[ADD1_LCSSA:%.*]] = phi float [ [[ADD1]], [[FOR_BODY]] ], [ [[TMP19]], [[MIDDLE_BLOCK]] ] -; CHECK-ORDERED-TF-NEXT: [[ADD2_LCSSA:%.*]] = phi float [ [[ADD2]], [[FOR_BODY]] ], [ [[TMP17]], [[MIDDLE_BLOCK]] ] +; CHECK-ORDERED-TF-NEXT: [[ADD1_LCSSA:%.*]] = phi float [ [[ADD1]], [[FOR_BODY]] ], [ [[TMP16]], [[MIDDLE_BLOCK]] ] +; CHECK-ORDERED-TF-NEXT: [[ADD2_LCSSA:%.*]] = phi float [ [[ADD2]], [[FOR_BODY]] ], [ [[TMP14]], [[MIDDLE_BLOCK]] ] ; CHECK-ORDERED-TF-NEXT: store float [[ADD1_LCSSA]], ptr [[A]], align 4 ; CHECK-ORDERED-TF-NEXT: store float [[ADD2_LCSSA]], ptr [[ARRAYIDXA]], align 4 ; CHECK-ORDERED-TF-NEXT: ret void @@ -770,43 +740,41 @@ define float @fadd_of_sum(ptr noalias nocapture readonly %a, ptr noalias nocaptu ; CHECK-UNORDERED-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 ; CHECK-UNORDERED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP4]] ; CHECK-UNORDERED-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; CHECK-UNORDERED-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-UNORDERED-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4 ; CHECK-UNORDERED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-UNORDERED: vector.body: ; CHECK-UNORDERED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-UNORDERED-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ insertelement (<vscale x 4 x float> splat (float -0.000000e+00), float 0.000000e+00, i32 0), [[VECTOR_PH]] ], [ [[TMP10:%.*]], [[VECTOR_BODY]] ] -; CHECK-UNORDERED-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] -; CHECK-UNORDERED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP7]], align 4 -; CHECK-UNORDERED-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]] -; CHECK-UNORDERED-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x float>, ptr [[TMP8]], align 4 -; CHECK-UNORDERED-NEXT: [[TMP9:%.*]] = fadd <vscale x 4 x float> [[WIDE_LOAD]], [[WIDE_LOAD1]] -; CHECK-UNORDERED-NEXT: [[TMP10]] = fadd <vscale x 4 x float> [[VEC_PHI]], [[TMP9]] -; CHECK-UNORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] -; CHECK-UNORDERED-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-UNORDERED-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-UNORDERED-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ insertelement (<vscale x 4 x float> splat (float -0.000000e+00), float 0.000000e+00, i32 0), [[VECTOR_PH]] ], [ [[TMP8:%.*]], [[VECTOR_BODY]] ] +; CHECK-UNORDERED-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] +; CHECK-UNORDERED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP5]], align 4 +; CHECK-UNORDERED-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]] +; CHECK-UNORDERED-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x float>, ptr [[TMP6]], align 4 +; CHECK-UNORDERED-NEXT: [[TMP7:%.*]] = fadd <vscale x 4 x float> [[WIDE_LOAD]], [[WIDE_LOAD1]] +; CHECK-UNORDERED-NEXT: [[TMP8]] = fadd <vscale x 4 x float> [[VEC_PHI]], [[TMP7]] +; CHECK-UNORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]] +; CHECK-UNORDERED-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-UNORDERED-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK-UNORDERED: middle.block: -; CHECK-UNORDERED-NEXT: [[TMP12:%.*]] = call float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[TMP10]]) +; CHECK-UNORDERED-NEXT: [[TMP10:%.*]] = call float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[TMP8]]) ; CHECK-UNORDERED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-UNORDERED-NEXT: br i1 [[CMP_N]], label [[FOR_END_LOOPEXIT:%.*]], label [[SCALAR_PH]] ; CHECK-UNORDERED: scalar.ph: ; CHECK-UNORDERED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ] -; CHECK-UNORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP12]], [[MIDDLE_BLOCK]] ], [ 0.000000e+00, [[FOR_BODY_PREHEADER]] ] +; CHECK-UNORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP10]], [[MIDDLE_BLOCK]] ], [ 0.000000e+00, [[FOR_BODY_PREHEADER]] ] ; CHECK-UNORDERED-NEXT: br label [[FOR_BODY:%.*]] ; CHECK-UNORDERED: for.body: ; CHECK-UNORDERED-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] ; CHECK-UNORDERED-NEXT: [[RES_014:%.*]] = phi float [ [[RDX:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ] ; CHECK-UNORDERED-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; CHECK-UNORDERED-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 +; CHECK-UNORDERED-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 ; CHECK-UNORDERED-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]] -; CHECK-UNORDERED-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX4]], align 4 -; CHECK-UNORDERED-NEXT: [[ADD:%.*]] = fadd float [[TMP13]], [[TMP14]] +; CHECK-UNORDERED-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX4]], align 4 +; CHECK-UNORDERED-NEXT: [[ADD:%.*]] = fadd float [[TMP11]], [[TMP12]] ; CHECK-UNORDERED-NEXT: [[RDX]] = fadd float [[RES_014]], [[ADD]] ; CHECK-UNORDERED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-UNORDERED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] ; CHECK-UNORDERED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK-UNORDERED: for.end.loopexit: -; CHECK-UNORDERED-NEXT: [[RDX_LCSSA:%.*]] = phi float [ [[RDX]], [[FOR_BODY]] ], [ [[TMP12]], [[MIDDLE_BLOCK]] ] +; CHECK-UNORDERED-NEXT: [[RDX_LCSSA:%.*]] = phi float [ [[RDX]], [[FOR_BODY]] ], [ [[TMP10]], [[MIDDLE_BLOCK]] ] ; CHECK-UNORDERED-NEXT: br label [[FOR_END]] ; CHECK-UNORDERED: for.end: ; CHECK-UNORDERED-NEXT: [[RES:%.*]] = phi float [ 0.000000e+00, [[ENTRY:%.*]] ], [ [[RDX_LCSSA]], [[FOR_END_LOOPEXIT]] ] @@ -829,42 +797,40 @@ define float @fadd_of_sum(ptr noalias nocapture readonly %a, ptr noalias nocaptu ; CHECK-ORDERED-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 ; CHECK-ORDERED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP4]] ; CHECK-ORDERED-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; CHECK-ORDERED-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4 ; CHECK-ORDERED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-ORDERED: vector.body: ; CHECK-ORDERED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-ORDERED-NEXT: [[VEC_PHI:%.*]] = phi float [ 0.000000e+00, [[VECTOR_PH]] ], [ [[TMP10:%.*]], [[VECTOR_BODY]] ] -; CHECK-ORDERED-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] -; CHECK-ORDERED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP7]], align 4 -; CHECK-ORDERED-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]] -; CHECK-ORDERED-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x float>, ptr [[TMP8]], align 4 -; CHECK-ORDERED-NEXT: [[TMP9:%.*]] = fadd <vscale x 4 x float> [[WIDE_LOAD]], [[WIDE_LOAD1]] -; CHECK-ORDERED-NEXT: [[TMP10]] = call float @llvm.vector.reduce.fadd.nxv4f32(float [[VEC_PHI]], <vscale x 4 x float> [[TMP9]]) -; CHECK-ORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] -; CHECK-ORDERED-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-ORDERED-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-ORDERED-NEXT: [[VEC_PHI:%.*]] = phi float [ 0.000000e+00, [[VECTOR_PH]] ], [ [[TMP8:%.*]], [[VECTOR_BODY]] ] +; CHECK-ORDERED-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] +; CHECK-ORDERED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP5]], align 4 +; CHECK-ORDERED-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]] +; CHECK-ORDERED-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x float>, ptr [[TMP6]], align 4 +; CHECK-ORDERED-NEXT: [[TMP7:%.*]] = fadd <vscale x 4 x float> [[WIDE_LOAD]], [[WIDE_LOAD1]] +; CHECK-ORDERED-NEXT: [[TMP8]] = call float @llvm.vector.reduce.fadd.nxv4f32(float [[VEC_PHI]], <vscale x 4 x float> [[TMP7]]) +; CHECK-ORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]] +; CHECK-ORDERED-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-ORDERED-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK-ORDERED: middle.block: ; CHECK-ORDERED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-ORDERED-NEXT: br i1 [[CMP_N]], label [[FOR_END_LOOPEXIT:%.*]], label [[SCALAR_PH]] ; CHECK-ORDERED: scalar.ph: ; CHECK-ORDERED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ] -; CHECK-ORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP10]], [[MIDDLE_BLOCK]] ], [ 0.000000e+00, [[FOR_BODY_PREHEADER]] ] +; CHECK-ORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP8]], [[MIDDLE_BLOCK]] ], [ 0.000000e+00, [[FOR_BODY_PREHEADER]] ] ; CHECK-ORDERED-NEXT: br label [[FOR_BODY:%.*]] ; CHECK-ORDERED: for.body: ; CHECK-ORDERED-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] ; CHECK-ORDERED-NEXT: [[RES_014:%.*]] = phi float [ [[RDX:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ] ; CHECK-ORDERED-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; CHECK-ORDERED-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 +; CHECK-ORDERED-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 ; CHECK-ORDERED-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]] -; CHECK-ORDERED-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX4]], align 4 -; CHECK-ORDERED-NEXT: [[ADD:%.*]] = fadd float [[TMP12]], [[TMP13]] +; CHECK-ORDERED-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX4]], align 4 +; CHECK-ORDERED-NEXT: [[ADD:%.*]] = fadd float [[TMP10]], [[TMP11]] ; CHECK-ORDERED-NEXT: [[RDX]] = fadd float [[RES_014]], [[ADD]] ; CHECK-ORDERED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-ORDERED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] ; CHECK-ORDERED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK-ORDERED: for.end.loopexit: -; CHECK-ORDERED-NEXT: [[RDX_LCSSA:%.*]] = phi float [ [[RDX]], [[FOR_BODY]] ], [ [[TMP10]], [[MIDDLE_BLOCK]] ] +; CHECK-ORDERED-NEXT: [[RDX_LCSSA:%.*]] = phi float [ [[RDX]], [[FOR_BODY]] ], [ [[TMP8]], [[MIDDLE_BLOCK]] ] ; CHECK-ORDERED-NEXT: br label [[FOR_END]] ; CHECK-ORDERED: for.end: ; CHECK-ORDERED-NEXT: [[RES:%.*]] = phi float [ 0.000000e+00, [[ENTRY:%.*]] ], [ [[RDX_LCSSA]], [[FOR_END_LOOPEXIT]] ] @@ -882,35 +848,29 @@ define float @fadd_of_sum(ptr noalias nocapture readonly %a, ptr noalias nocaptu ; CHECK-ORDERED-TF: vector.ph: ; CHECK-ORDERED-TF-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-ORDERED-TF-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP1]], 4 -; CHECK-ORDERED-TF-NEXT: [[TMP3:%.*]] = sub i64 [[TMP2]], 1 -; CHECK-ORDERED-TF-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP3]] -; CHECK-ORDERED-TF-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP2]] -; CHECK-ORDERED-TF-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] -; CHECK-ORDERED-TF-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-TF-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; CHECK-ORDERED-TF-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-TF-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4 -; CHECK-ORDERED-TF-NEXT: [[TMP8:%.*]] = sub i64 [[N]], [[TMP7]] -; CHECK-ORDERED-TF-NEXT: [[TMP9:%.*]] = icmp ugt i64 [[N]], [[TMP7]] -; CHECK-ORDERED-TF-NEXT: [[TMP10:%.*]] = select i1 [[TMP9]], i64 [[TMP8]], i64 0 +; CHECK-ORDERED-TF-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-ORDERED-TF-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 +; CHECK-ORDERED-TF-NEXT: [[TMP5:%.*]] = sub i64 [[N]], [[TMP4]] +; CHECK-ORDERED-TF-NEXT: [[TMP6:%.*]] = icmp ugt i64 [[N]], [[TMP4]] +; CHECK-ORDERED-TF-NEXT: [[TMP7:%.*]] = select i1 [[TMP6]], i64 [[TMP5]], i64 0 ; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 [[N]]) ; CHECK-ORDERED-TF-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-ORDERED-TF: vector.body: ; CHECK-ORDERED-TF-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 4 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-ORDERED-TF-NEXT: [[VEC_PHI:%.*]] = phi float [ 0.000000e+00, [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ] -; CHECK-ORDERED-TF-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] -; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP11]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x float> poison) -; CHECK-ORDERED-TF-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]] -; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP12]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x float> poison) -; CHECK-ORDERED-TF-NEXT: [[TMP13:%.*]] = fadd <vscale x 4 x float> [[WIDE_MASKED_LOAD]], [[WIDE_MASKED_LOAD1]] -; CHECK-ORDERED-TF-NEXT: [[TMP14:%.*]] = select <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x float> [[TMP13]], <vscale x 4 x float> splat (float -0.000000e+00) -; CHECK-ORDERED-TF-NEXT: [[TMP15]] = call float @llvm.vector.reduce.fadd.nxv4f32(float [[VEC_PHI]], <vscale x 4 x float> [[TMP14]]) -; CHECK-ORDERED-TF-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP5]] -; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP10]]) -; CHECK-ORDERED-TF-NEXT: [[TMP16:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) -; CHECK-ORDERED-TF-NEXT: [[TMP17:%.*]] = extractelement <vscale x 4 x i1> [[TMP16]], i32 0 -; CHECK-ORDERED-TF-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-ORDERED-TF-NEXT: [[VEC_PHI:%.*]] = phi float [ 0.000000e+00, [[VECTOR_PH]] ], [ [[TMP12:%.*]], [[VECTOR_BODY]] ] +; CHECK-ORDERED-TF-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] +; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP8]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x float> poison) +; CHECK-ORDERED-TF-NEXT: [[TMP9:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]] +; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP9]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x float> poison) +; CHECK-ORDERED-TF-NEXT: [[TMP10:%.*]] = fadd <vscale x 4 x float> [[WIDE_MASKED_LOAD]], [[WIDE_MASKED_LOAD1]] +; CHECK-ORDERED-TF-NEXT: [[TMP11:%.*]] = select <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x float> [[TMP10]], <vscale x 4 x float> splat (float -0.000000e+00) +; CHECK-ORDERED-TF-NEXT: [[TMP12]] = call float @llvm.vector.reduce.fadd.nxv4f32(float [[VEC_PHI]], <vscale x 4 x float> [[TMP11]]) +; CHECK-ORDERED-TF-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP2]] +; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP7]]) +; CHECK-ORDERED-TF-NEXT: [[TMP13:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) +; CHECK-ORDERED-TF-NEXT: [[TMP14:%.*]] = extractelement <vscale x 4 x i1> [[TMP13]], i32 0 +; CHECK-ORDERED-TF-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK-ORDERED-TF: middle.block: ; CHECK-ORDERED-TF-NEXT: br label [[FOR_END_LOOPEXIT:%.*]] ; CHECK-ORDERED-TF: scalar.ph: @@ -921,16 +881,16 @@ define float @fadd_of_sum(ptr noalias nocapture readonly %a, ptr noalias nocaptu ; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ] ; CHECK-ORDERED-TF-NEXT: [[RES_014:%.*]] = phi float [ [[RDX:%.*]], [[FOR_BODY]] ], [ 0.000000e+00, [[SCALAR_PH]] ] ; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; CHECK-ORDERED-TF-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 +; CHECK-ORDERED-TF-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 ; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]] -; CHECK-ORDERED-TF-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX4]], align 4 -; CHECK-ORDERED-TF-NEXT: [[ADD:%.*]] = fadd float [[TMP18]], [[TMP19]] +; CHECK-ORDERED-TF-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX4]], align 4 +; CHECK-ORDERED-TF-NEXT: [[ADD:%.*]] = fadd float [[TMP15]], [[TMP16]] ; CHECK-ORDERED-TF-NEXT: [[RDX]] = fadd float [[RES_014]], [[ADD]] ; CHECK-ORDERED-TF-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-ORDERED-TF-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] ; CHECK-ORDERED-TF-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK-ORDERED-TF: for.end.loopexit: -; CHECK-ORDERED-TF-NEXT: [[RDX_LCSSA:%.*]] = phi float [ [[RDX]], [[FOR_BODY]] ], [ [[TMP15]], [[MIDDLE_BLOCK]] ] +; CHECK-ORDERED-TF-NEXT: [[RDX_LCSSA:%.*]] = phi float [ [[RDX]], [[FOR_BODY]] ], [ [[TMP12]], [[MIDDLE_BLOCK]] ] ; CHECK-ORDERED-TF-NEXT: br label [[FOR_END]] ; CHECK-ORDERED-TF: for.end: ; CHECK-ORDERED-TF-NEXT: [[RES:%.*]] = phi float [ 0.000000e+00, [[ENTRY:%.*]] ], [ [[RDX_LCSSA]], [[FOR_END_LOOPEXIT]] ] @@ -1002,49 +962,47 @@ define float @fadd_conditional(ptr noalias nocapture readonly %a, ptr noalias no ; CHECK-UNORDERED-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; CHECK-UNORDERED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; CHECK-UNORDERED-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; CHECK-UNORDERED-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-UNORDERED-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-UNORDERED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-UNORDERED: vector.body: ; CHECK-UNORDERED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-UNORDERED-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ insertelement (<vscale x 4 x float> splat (float -0.000000e+00), float 1.000000e+00, i32 0), [[VECTOR_PH]] ], [ [[TMP9:%.*]], [[VECTOR_BODY]] ] -; CHECK-UNORDERED-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]] -; CHECK-UNORDERED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP6]], align 4 -; CHECK-UNORDERED-NEXT: [[TMP7:%.*]] = fcmp une <vscale x 4 x float> [[WIDE_LOAD]], zeroinitializer -; CHECK-UNORDERED-NEXT: [[TMP8:%.*]] = getelementptr float, ptr [[A]], i64 [[INDEX]] -; CHECK-UNORDERED-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP8]], i32 4, <vscale x 4 x i1> [[TMP7]], <vscale x 4 x float> poison) -; CHECK-UNORDERED-NEXT: [[PREDPHI:%.*]] = select <vscale x 4 x i1> [[TMP7]], <vscale x 4 x float> [[WIDE_MASKED_LOAD]], <vscale x 4 x float> splat (float 3.000000e+00) -; CHECK-UNORDERED-NEXT: [[TMP9]] = fadd <vscale x 4 x float> [[VEC_PHI]], [[PREDPHI]] -; CHECK-UNORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-UNORDERED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-UNORDERED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-UNORDERED-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ insertelement (<vscale x 4 x float> splat (float -0.000000e+00), float 1.000000e+00, i32 0), [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ] +; CHECK-UNORDERED-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]] +; CHECK-UNORDERED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP4]], align 4 +; CHECK-UNORDERED-NEXT: [[TMP5:%.*]] = fcmp une <vscale x 4 x float> [[WIDE_LOAD]], zeroinitializer +; CHECK-UNORDERED-NEXT: [[TMP6:%.*]] = getelementptr float, ptr [[A]], i64 [[INDEX]] +; CHECK-UNORDERED-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP6]], i32 4, <vscale x 4 x i1> [[TMP5]], <vscale x 4 x float> poison) +; CHECK-UNORDERED-NEXT: [[PREDPHI:%.*]] = select <vscale x 4 x i1> [[TMP5]], <vscale x 4 x float> [[WIDE_MASKED_LOAD]], <vscale x 4 x float> splat (float 3.000000e+00) +; CHECK-UNORDERED-NEXT: [[TMP7]] = fadd <vscale x 4 x float> [[VEC_PHI]], [[PREDPHI]] +; CHECK-UNORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] +; CHECK-UNORDERED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-UNORDERED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK-UNORDERED: middle.block: -; CHECK-UNORDERED-NEXT: [[TMP11:%.*]] = call float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[TMP9]]) +; CHECK-UNORDERED-NEXT: [[TMP9:%.*]] = call float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[TMP7]]) ; CHECK-UNORDERED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-UNORDERED-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] ; CHECK-UNORDERED: scalar.ph: ; CHECK-UNORDERED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] -; CHECK-UNORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP11]], [[MIDDLE_BLOCK]] ], [ 1.000000e+00, [[ENTRY]] ] +; CHECK-UNORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP9]], [[MIDDLE_BLOCK]] ], [ 1.000000e+00, [[ENTRY]] ] ; CHECK-UNORDERED-NEXT: br label [[FOR_BODY:%.*]] ; CHECK-UNORDERED: for.body: ; CHECK-UNORDERED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ] ; CHECK-UNORDERED-NEXT: [[RES:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[FADD:%.*]], [[FOR_INC]] ] ; CHECK-UNORDERED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]] -; CHECK-UNORDERED-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-UNORDERED-NEXT: [[TOBOOL:%.*]] = fcmp une float [[TMP12]], 0.000000e+00 +; CHECK-UNORDERED-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; CHECK-UNORDERED-NEXT: [[TOBOOL:%.*]] = fcmp une float [[TMP10]], 0.000000e+00 ; CHECK-UNORDERED-NEXT: br i1 [[TOBOOL]], label [[IF_THEN:%.*]], label [[FOR_INC]] ; CHECK-UNORDERED: if.then: ; CHECK-UNORDERED-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; CHECK-UNORDERED-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 +; CHECK-UNORDERED-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 ; CHECK-UNORDERED-NEXT: br label [[FOR_INC]] ; CHECK-UNORDERED: for.inc: -; CHECK-UNORDERED-NEXT: [[PHI:%.*]] = phi float [ [[TMP13]], [[IF_THEN]] ], [ 3.000000e+00, [[FOR_BODY]] ] +; CHECK-UNORDERED-NEXT: [[PHI:%.*]] = phi float [ [[TMP11]], [[IF_THEN]] ], [ 3.000000e+00, [[FOR_BODY]] ] ; CHECK-UNORDERED-NEXT: [[FADD]] = fadd float [[RES]], [[PHI]] ; CHECK-UNORDERED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-UNORDERED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] ; CHECK-UNORDERED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK-UNORDERED: for.end: -; CHECK-UNORDERED-NEXT: [[RDX:%.*]] = phi float [ [[FADD]], [[FOR_INC]] ], [ [[TMP11]], [[MIDDLE_BLOCK]] ] +; CHECK-UNORDERED-NEXT: [[RDX:%.*]] = phi float [ [[FADD]], [[FOR_INC]] ], [ [[TMP9]], [[MIDDLE_BLOCK]] ] ; CHECK-UNORDERED-NEXT: ret float [[RDX]] ; ; CHECK-ORDERED-LABEL: define float @fadd_conditional @@ -1059,48 +1017,46 @@ define float @fadd_conditional(ptr noalias nocapture readonly %a, ptr noalias no ; CHECK-ORDERED-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; CHECK-ORDERED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; CHECK-ORDERED-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; CHECK-ORDERED-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-ORDERED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-ORDERED: vector.body: ; CHECK-ORDERED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-ORDERED-NEXT: [[VEC_PHI:%.*]] = phi float [ 1.000000e+00, [[VECTOR_PH]] ], [ [[TMP9:%.*]], [[VECTOR_BODY]] ] -; CHECK-ORDERED-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]] -; CHECK-ORDERED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP6]], align 4 -; CHECK-ORDERED-NEXT: [[TMP7:%.*]] = fcmp une <vscale x 4 x float> [[WIDE_LOAD]], zeroinitializer -; CHECK-ORDERED-NEXT: [[TMP8:%.*]] = getelementptr float, ptr [[A]], i64 [[INDEX]] -; CHECK-ORDERED-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP8]], i32 4, <vscale x 4 x i1> [[TMP7]], <vscale x 4 x float> poison) -; CHECK-ORDERED-NEXT: [[PREDPHI:%.*]] = select <vscale x 4 x i1> [[TMP7]], <vscale x 4 x float> [[WIDE_MASKED_LOAD]], <vscale x 4 x float> splat (float 3.000000e+00) -; CHECK-ORDERED-NEXT: [[TMP9]] = call float @llvm.vector.reduce.fadd.nxv4f32(float [[VEC_PHI]], <vscale x 4 x float> [[PREDPHI]]) -; CHECK-ORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-ORDERED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-ORDERED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-ORDERED-NEXT: [[VEC_PHI:%.*]] = phi float [ 1.000000e+00, [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ] +; CHECK-ORDERED-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]] +; CHECK-ORDERED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP4]], align 4 +; CHECK-ORDERED-NEXT: [[TMP5:%.*]] = fcmp une <vscale x 4 x float> [[WIDE_LOAD]], zeroinitializer +; CHECK-ORDERED-NEXT: [[TMP6:%.*]] = getelementptr float, ptr [[A]], i64 [[INDEX]] +; CHECK-ORDERED-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP6]], i32 4, <vscale x 4 x i1> [[TMP5]], <vscale x 4 x float> poison) +; CHECK-ORDERED-NEXT: [[PREDPHI:%.*]] = select <vscale x 4 x i1> [[TMP5]], <vscale x 4 x float> [[WIDE_MASKED_LOAD]], <vscale x 4 x float> splat (float 3.000000e+00) +; CHECK-ORDERED-NEXT: [[TMP7]] = call float @llvm.vector.reduce.fadd.nxv4f32(float [[VEC_PHI]], <vscale x 4 x float> [[PREDPHI]]) +; CHECK-ORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] +; CHECK-ORDERED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-ORDERED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK-ORDERED: middle.block: ; CHECK-ORDERED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-ORDERED-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] ; CHECK-ORDERED: scalar.ph: ; CHECK-ORDERED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] -; CHECK-ORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP9]], [[MIDDLE_BLOCK]] ], [ 1.000000e+00, [[ENTRY]] ] +; CHECK-ORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP7]], [[MIDDLE_BLOCK]] ], [ 1.000000e+00, [[ENTRY]] ] ; CHECK-ORDERED-NEXT: br label [[FOR_BODY:%.*]] ; CHECK-ORDERED: for.body: ; CHECK-ORDERED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ] ; CHECK-ORDERED-NEXT: [[RES:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[FADD:%.*]], [[FOR_INC]] ] ; CHECK-ORDERED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]] -; CHECK-ORDERED-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-ORDERED-NEXT: [[TOBOOL:%.*]] = fcmp une float [[TMP11]], 0.000000e+00 +; CHECK-ORDERED-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; CHECK-ORDERED-NEXT: [[TOBOOL:%.*]] = fcmp une float [[TMP9]], 0.000000e+00 ; CHECK-ORDERED-NEXT: br i1 [[TOBOOL]], label [[IF_THEN:%.*]], label [[FOR_INC]] ; CHECK-ORDERED: if.then: ; CHECK-ORDERED-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; CHECK-ORDERED-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 +; CHECK-ORDERED-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 ; CHECK-ORDERED-NEXT: br label [[FOR_INC]] ; CHECK-ORDERED: for.inc: -; CHECK-ORDERED-NEXT: [[PHI:%.*]] = phi float [ [[TMP12]], [[IF_THEN]] ], [ 3.000000e+00, [[FOR_BODY]] ] +; CHECK-ORDERED-NEXT: [[PHI:%.*]] = phi float [ [[TMP10]], [[IF_THEN]] ], [ 3.000000e+00, [[FOR_BODY]] ] ; CHECK-ORDERED-NEXT: [[FADD]] = fadd float [[RES]], [[PHI]] ; CHECK-ORDERED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-ORDERED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] ; CHECK-ORDERED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK-ORDERED: for.end: -; CHECK-ORDERED-NEXT: [[RDX:%.*]] = phi float [ [[FADD]], [[FOR_INC]] ], [ [[TMP9]], [[MIDDLE_BLOCK]] ] +; CHECK-ORDERED-NEXT: [[RDX:%.*]] = phi float [ [[FADD]], [[FOR_INC]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ] ; CHECK-ORDERED-NEXT: ret float [[RDX]] ; ; CHECK-ORDERED-TF-LABEL: define float @fadd_conditional @@ -1110,37 +1066,31 @@ define float @fadd_conditional(ptr noalias nocapture readonly %a, ptr noalias no ; CHECK-ORDERED-TF: vector.ph: ; CHECK-ORDERED-TF-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-ORDERED-TF-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; CHECK-ORDERED-TF-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 -; CHECK-ORDERED-TF-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP2]] -; CHECK-ORDERED-TF-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; CHECK-ORDERED-TF-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] -; CHECK-ORDERED-TF-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-TF-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 -; CHECK-ORDERED-TF-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-TF-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4 -; CHECK-ORDERED-TF-NEXT: [[TMP7:%.*]] = sub i64 [[N]], [[TMP6]] -; CHECK-ORDERED-TF-NEXT: [[TMP8:%.*]] = icmp ugt i64 [[N]], [[TMP6]] -; CHECK-ORDERED-TF-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], i64 [[TMP7]], i64 0 +; CHECK-ORDERED-TF-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-ORDERED-TF-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 +; CHECK-ORDERED-TF-NEXT: [[TMP4:%.*]] = sub i64 [[N]], [[TMP3]] +; CHECK-ORDERED-TF-NEXT: [[TMP5:%.*]] = icmp ugt i64 [[N]], [[TMP3]] +; CHECK-ORDERED-TF-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], i64 [[TMP4]], i64 0 ; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 [[N]]) ; CHECK-ORDERED-TF-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-ORDERED-TF: vector.body: ; CHECK-ORDERED-TF-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 4 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-ORDERED-TF-NEXT: [[VEC_PHI:%.*]] = phi float [ 1.000000e+00, [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ] -; CHECK-ORDERED-TF-NEXT: [[TMP10:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]] -; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP10]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x float> poison) -; CHECK-ORDERED-TF-NEXT: [[TMP11:%.*]] = fcmp une <vscale x 4 x float> [[WIDE_MASKED_LOAD]], zeroinitializer -; CHECK-ORDERED-TF-NEXT: [[TMP12:%.*]] = select <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i1> [[TMP11]], <vscale x 4 x i1> zeroinitializer -; CHECK-ORDERED-TF-NEXT: [[TMP13:%.*]] = getelementptr float, ptr [[A]], i64 [[INDEX]] -; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP13]], i32 4, <vscale x 4 x i1> [[TMP12]], <vscale x 4 x float> poison) -; CHECK-ORDERED-TF-NEXT: [[PREDPHI:%.*]] = select <vscale x 4 x i1> [[TMP12]], <vscale x 4 x float> [[WIDE_MASKED_LOAD1]], <vscale x 4 x float> splat (float 3.000000e+00) -; CHECK-ORDERED-TF-NEXT: [[TMP14:%.*]] = select <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x float> [[PREDPHI]], <vscale x 4 x float> splat (float -0.000000e+00) -; CHECK-ORDERED-TF-NEXT: [[TMP15]] = call float @llvm.vector.reduce.fadd.nxv4f32(float [[VEC_PHI]], <vscale x 4 x float> [[TMP14]]) -; CHECK-ORDERED-TF-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP4]] -; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP9]]) -; CHECK-ORDERED-TF-NEXT: [[TMP16:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) -; CHECK-ORDERED-TF-NEXT: [[TMP17:%.*]] = extractelement <vscale x 4 x i1> [[TMP16]], i32 0 -; CHECK-ORDERED-TF-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-ORDERED-TF-NEXT: [[VEC_PHI:%.*]] = phi float [ 1.000000e+00, [[VECTOR_PH]] ], [ [[TMP12:%.*]], [[VECTOR_BODY]] ] +; CHECK-ORDERED-TF-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]] +; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP7]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x float> poison) +; CHECK-ORDERED-TF-NEXT: [[TMP8:%.*]] = fcmp une <vscale x 4 x float> [[WIDE_MASKED_LOAD]], zeroinitializer +; CHECK-ORDERED-TF-NEXT: [[TMP9:%.*]] = select <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i1> [[TMP8]], <vscale x 4 x i1> zeroinitializer +; CHECK-ORDERED-TF-NEXT: [[TMP10:%.*]] = getelementptr float, ptr [[A]], i64 [[INDEX]] +; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP10]], i32 4, <vscale x 4 x i1> [[TMP9]], <vscale x 4 x float> poison) +; CHECK-ORDERED-TF-NEXT: [[PREDPHI:%.*]] = select <vscale x 4 x i1> [[TMP9]], <vscale x 4 x float> [[WIDE_MASKED_LOAD1]], <vscale x 4 x float> splat (float 3.000000e+00) +; CHECK-ORDERED-TF-NEXT: [[TMP11:%.*]] = select <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x float> [[PREDPHI]], <vscale x 4 x float> splat (float -0.000000e+00) +; CHECK-ORDERED-TF-NEXT: [[TMP12]] = call float @llvm.vector.reduce.fadd.nxv4f32(float [[VEC_PHI]], <vscale x 4 x float> [[TMP11]]) +; CHECK-ORDERED-TF-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]] +; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP6]]) +; CHECK-ORDERED-TF-NEXT: [[TMP13:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) +; CHECK-ORDERED-TF-NEXT: [[TMP14:%.*]] = extractelement <vscale x 4 x i1> [[TMP13]], i32 0 +; CHECK-ORDERED-TF-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK-ORDERED-TF: middle.block: ; CHECK-ORDERED-TF-NEXT: br label [[FOR_END:%.*]] ; CHECK-ORDERED-TF: scalar.ph: @@ -1151,21 +1101,21 @@ define float @fadd_conditional(ptr noalias nocapture readonly %a, ptr noalias no ; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ] ; CHECK-ORDERED-TF-NEXT: [[RES:%.*]] = phi float [ 1.000000e+00, [[SCALAR_PH]] ], [ [[FADD:%.*]], [[FOR_INC]] ] ; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]] -; CHECK-ORDERED-TF-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-ORDERED-TF-NEXT: [[TOBOOL:%.*]] = fcmp une float [[TMP18]], 0.000000e+00 +; CHECK-ORDERED-TF-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; CHECK-ORDERED-TF-NEXT: [[TOBOOL:%.*]] = fcmp une float [[TMP15]], 0.000000e+00 ; CHECK-ORDERED-TF-NEXT: br i1 [[TOBOOL]], label [[IF_THEN:%.*]], label [[FOR_INC]] ; CHECK-ORDERED-TF: if.then: ; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; CHECK-ORDERED-TF-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 +; CHECK-ORDERED-TF-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 ; CHECK-ORDERED-TF-NEXT: br label [[FOR_INC]] ; CHECK-ORDERED-TF: for.inc: -; CHECK-ORDERED-TF-NEXT: [[PHI:%.*]] = phi float [ [[TMP19]], [[IF_THEN]] ], [ 3.000000e+00, [[FOR_BODY]] ] +; CHECK-ORDERED-TF-NEXT: [[PHI:%.*]] = phi float [ [[TMP16]], [[IF_THEN]] ], [ 3.000000e+00, [[FOR_BODY]] ] ; CHECK-ORDERED-TF-NEXT: [[FADD]] = fadd float [[RES]], [[PHI]] ; CHECK-ORDERED-TF-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-ORDERED-TF-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] ; CHECK-ORDERED-TF-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK-ORDERED-TF: for.end: -; CHECK-ORDERED-TF-NEXT: [[RDX:%.*]] = phi float [ [[FADD]], [[FOR_INC]] ], [ [[TMP15]], [[MIDDLE_BLOCK]] ] +; CHECK-ORDERED-TF-NEXT: [[RDX:%.*]] = phi float [ [[FADD]], [[FOR_INC]] ], [ [[TMP12]], [[MIDDLE_BLOCK]] ] ; CHECK-ORDERED-TF-NEXT: ret float [[RDX]] ; @@ -1234,43 +1184,41 @@ define float @fadd_multiple(ptr noalias nocapture %a, ptr noalias nocapture %b, ; CHECK-UNORDERED-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 ; CHECK-UNORDERED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; CHECK-UNORDERED-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; CHECK-UNORDERED-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-UNORDERED-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8 ; CHECK-UNORDERED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-UNORDERED: vector.body: ; CHECK-UNORDERED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-UNORDERED-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x float> [ insertelement (<vscale x 8 x float> splat (float -0.000000e+00), float -0.000000e+00, i32 0), [[VECTOR_PH]] ], [ [[TMP9:%.*]], [[VECTOR_BODY]] ] -; CHECK-UNORDERED-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] -; CHECK-UNORDERED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x float>, ptr [[TMP6]], align 4 -; CHECK-UNORDERED-NEXT: [[TMP7:%.*]] = fadd <vscale x 8 x float> [[VEC_PHI]], [[WIDE_LOAD]] -; CHECK-UNORDERED-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]] -; CHECK-UNORDERED-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 8 x float>, ptr [[TMP8]], align 4 -; CHECK-UNORDERED-NEXT: [[TMP9]] = fadd <vscale x 8 x float> [[TMP7]], [[WIDE_LOAD1]] -; CHECK-UNORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-UNORDERED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-UNORDERED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-UNORDERED-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x float> [ insertelement (<vscale x 8 x float> splat (float -0.000000e+00), float -0.000000e+00, i32 0), [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ] +; CHECK-UNORDERED-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] +; CHECK-UNORDERED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x float>, ptr [[TMP4]], align 4 +; CHECK-UNORDERED-NEXT: [[TMP5:%.*]] = fadd <vscale x 8 x float> [[VEC_PHI]], [[WIDE_LOAD]] +; CHECK-UNORDERED-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]] +; CHECK-UNORDERED-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 8 x float>, ptr [[TMP6]], align 4 +; CHECK-UNORDERED-NEXT: [[TMP7]] = fadd <vscale x 8 x float> [[TMP5]], [[WIDE_LOAD1]] +; CHECK-UNORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] +; CHECK-UNORDERED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-UNORDERED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK-UNORDERED: middle.block: -; CHECK-UNORDERED-NEXT: [[TMP11:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float -0.000000e+00, <vscale x 8 x float> [[TMP9]]) +; CHECK-UNORDERED-NEXT: [[TMP9:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float -0.000000e+00, <vscale x 8 x float> [[TMP7]]) ; CHECK-UNORDERED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-UNORDERED-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] ; CHECK-UNORDERED: scalar.ph: ; CHECK-UNORDERED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] -; CHECK-UNORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP11]], [[MIDDLE_BLOCK]] ], [ -0.000000e+00, [[ENTRY]] ] +; CHECK-UNORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP9]], [[MIDDLE_BLOCK]] ], [ -0.000000e+00, [[ENTRY]] ] ; CHECK-UNORDERED-NEXT: br label [[FOR_BODY:%.*]] ; CHECK-UNORDERED: for.body: ; CHECK-UNORDERED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-UNORDERED-NEXT: [[SUM:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD3:%.*]], [[FOR_BODY]] ] ; CHECK-UNORDERED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; CHECK-UNORDERED-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-UNORDERED-NEXT: [[ADD:%.*]] = fadd float [[SUM]], [[TMP12]] +; CHECK-UNORDERED-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; CHECK-UNORDERED-NEXT: [[ADD:%.*]] = fadd float [[SUM]], [[TMP10]] ; CHECK-UNORDERED-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]] -; CHECK-UNORDERED-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 -; CHECK-UNORDERED-NEXT: [[ADD3]] = fadd float [[ADD]], [[TMP13]] +; CHECK-UNORDERED-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 +; CHECK-UNORDERED-NEXT: [[ADD3]] = fadd float [[ADD]], [[TMP11]] ; CHECK-UNORDERED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-UNORDERED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] ; CHECK-UNORDERED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK-UNORDERED: for.end: -; CHECK-UNORDERED-NEXT: [[RDX:%.*]] = phi float [ [[ADD3]], [[FOR_BODY]] ], [ [[TMP11]], [[MIDDLE_BLOCK]] ] +; CHECK-UNORDERED-NEXT: [[RDX:%.*]] = phi float [ [[ADD3]], [[FOR_BODY]] ], [ [[TMP9]], [[MIDDLE_BLOCK]] ] ; CHECK-UNORDERED-NEXT: ret float [[RDX]] ; ; CHECK-ORDERED-LABEL: define float @fadd_multiple @@ -1371,74 +1319,72 @@ define float @fmuladd_strict(ptr %a, ptr %b, i64 %n) #0 { ; CHECK-UNORDERED-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 32 ; CHECK-UNORDERED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; CHECK-UNORDERED-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; CHECK-UNORDERED-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-UNORDERED-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 32 ; CHECK-UNORDERED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-UNORDERED: vector.body: ; CHECK-UNORDERED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-UNORDERED-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x float> [ insertelement (<vscale x 8 x float> splat (float -0.000000e+00), float 0.000000e+00, i32 0), [[VECTOR_PH]] ], [ [[TMP26:%.*]], [[VECTOR_BODY]] ] -; CHECK-UNORDERED-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 8 x float> [ splat (float -0.000000e+00), [[VECTOR_PH]] ], [ [[TMP27:%.*]], [[VECTOR_BODY]] ] -; CHECK-UNORDERED-NEXT: [[VEC_PHI2:%.*]] = phi <vscale x 8 x float> [ splat (float -0.000000e+00), [[VECTOR_PH]] ], [ [[TMP28:%.*]], [[VECTOR_BODY]] ] -; CHECK-UNORDERED-NEXT: [[VEC_PHI3:%.*]] = phi <vscale x 8 x float> [ splat (float -0.000000e+00), [[VECTOR_PH]] ], [ [[TMP29:%.*]], [[VECTOR_BODY]] ] -; CHECK-UNORDERED-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] -; CHECK-UNORDERED-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-UNORDERED-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 8 -; CHECK-UNORDERED-NEXT: [[TMP9:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i64 [[TMP8]] -; CHECK-UNORDERED-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-UNORDERED-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 16 -; CHECK-UNORDERED-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i64 [[TMP11]] -; CHECK-UNORDERED-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-UNORDERED-NEXT: [[TMP14:%.*]] = mul nuw i64 [[TMP13]], 24 -; CHECK-UNORDERED-NEXT: [[TMP15:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i64 [[TMP14]] -; CHECK-UNORDERED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x float>, ptr [[TMP6]], align 4 -; CHECK-UNORDERED-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 8 x float>, ptr [[TMP9]], align 4 -; CHECK-UNORDERED-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 8 x float>, ptr [[TMP12]], align 4 -; CHECK-UNORDERED-NEXT: [[WIDE_LOAD6:%.*]] = load <vscale x 8 x float>, ptr [[TMP15]], align 4 -; CHECK-UNORDERED-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]] -; CHECK-UNORDERED-NEXT: [[TMP17:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-UNORDERED-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP17]], 8 -; CHECK-UNORDERED-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[TMP18]] -; CHECK-UNORDERED-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-UNORDERED-NEXT: [[TMP21:%.*]] = mul nuw i64 [[TMP20]], 16 -; CHECK-UNORDERED-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[TMP21]] -; CHECK-UNORDERED-NEXT: [[TMP23:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-UNORDERED-NEXT: [[TMP24:%.*]] = mul nuw i64 [[TMP23]], 24 -; CHECK-UNORDERED-NEXT: [[TMP25:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[TMP24]] -; CHECK-UNORDERED-NEXT: [[WIDE_LOAD7:%.*]] = load <vscale x 8 x float>, ptr [[TMP16]], align 4 -; CHECK-UNORDERED-NEXT: [[WIDE_LOAD8:%.*]] = load <vscale x 8 x float>, ptr [[TMP19]], align 4 -; CHECK-UNORDERED-NEXT: [[WIDE_LOAD9:%.*]] = load <vscale x 8 x float>, ptr [[TMP22]], align 4 -; CHECK-UNORDERED-NEXT: [[WIDE_LOAD10:%.*]] = load <vscale x 8 x float>, ptr [[TMP25]], align 4 -; CHECK-UNORDERED-NEXT: [[TMP26]] = call <vscale x 8 x float> @llvm.fmuladd.nxv8f32(<vscale x 8 x float> [[WIDE_LOAD]], <vscale x 8 x float> [[WIDE_LOAD7]], <vscale x 8 x float> [[VEC_PHI]]) -; CHECK-UNORDERED-NEXT: [[TMP27]] = call <vscale x 8 x float> @llvm.fmuladd.nxv8f32(<vscale x 8 x float> [[WIDE_LOAD4]], <vscale x 8 x float> [[WIDE_LOAD8]], <vscale x 8 x float> [[VEC_PHI1]]) -; CHECK-UNORDERED-NEXT: [[TMP28]] = call <vscale x 8 x float> @llvm.fmuladd.nxv8f32(<vscale x 8 x float> [[WIDE_LOAD5]], <vscale x 8 x float> [[WIDE_LOAD9]], <vscale x 8 x float> [[VEC_PHI2]]) -; CHECK-UNORDERED-NEXT: [[TMP29]] = call <vscale x 8 x float> @llvm.fmuladd.nxv8f32(<vscale x 8 x float> [[WIDE_LOAD6]], <vscale x 8 x float> [[WIDE_LOAD10]], <vscale x 8 x float> [[VEC_PHI3]]) -; CHECK-UNORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-UNORDERED-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-UNORDERED-NEXT: br i1 [[TMP30]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK-UNORDERED-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x float> [ insertelement (<vscale x 8 x float> splat (float -0.000000e+00), float 0.000000e+00, i32 0), [[VECTOR_PH]] ], [ [[TMP24:%.*]], [[VECTOR_BODY]] ] +; CHECK-UNORDERED-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 8 x float> [ splat (float -0.000000e+00), [[VECTOR_PH]] ], [ [[TMP25:%.*]], [[VECTOR_BODY]] ] +; CHECK-UNORDERED-NEXT: [[VEC_PHI2:%.*]] = phi <vscale x 8 x float> [ splat (float -0.000000e+00), [[VECTOR_PH]] ], [ [[TMP26:%.*]], [[VECTOR_BODY]] ] +; CHECK-UNORDERED-NEXT: [[VEC_PHI3:%.*]] = phi <vscale x 8 x float> [ splat (float -0.000000e+00), [[VECTOR_PH]] ], [ [[TMP27:%.*]], [[VECTOR_BODY]] ] +; CHECK-UNORDERED-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] +; CHECK-UNORDERED-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-UNORDERED-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 8 +; CHECK-UNORDERED-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i64 [[TMP6]] +; CHECK-UNORDERED-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-UNORDERED-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 16 +; CHECK-UNORDERED-NEXT: [[TMP10:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i64 [[TMP9]] +; CHECK-UNORDERED-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-UNORDERED-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 24 +; CHECK-UNORDERED-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i64 [[TMP12]] +; CHECK-UNORDERED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x float>, ptr [[TMP4]], align 4 +; CHECK-UNORDERED-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 8 x float>, ptr [[TMP7]], align 4 +; CHECK-UNORDERED-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 8 x float>, ptr [[TMP10]], align 4 +; CHECK-UNORDERED-NEXT: [[WIDE_LOAD6:%.*]] = load <vscale x 8 x float>, ptr [[TMP13]], align 4 +; CHECK-UNORDERED-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]] +; CHECK-UNORDERED-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-UNORDERED-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP15]], 8 +; CHECK-UNORDERED-NEXT: [[TMP17:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i64 [[TMP16]] +; CHECK-UNORDERED-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-UNORDERED-NEXT: [[TMP19:%.*]] = mul nuw i64 [[TMP18]], 16 +; CHECK-UNORDERED-NEXT: [[TMP20:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i64 [[TMP19]] +; CHECK-UNORDERED-NEXT: [[TMP21:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-UNORDERED-NEXT: [[TMP22:%.*]] = mul nuw i64 [[TMP21]], 24 +; CHECK-UNORDERED-NEXT: [[TMP23:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i64 [[TMP22]] +; CHECK-UNORDERED-NEXT: [[WIDE_LOAD7:%.*]] = load <vscale x 8 x float>, ptr [[TMP14]], align 4 +; CHECK-UNORDERED-NEXT: [[WIDE_LOAD8:%.*]] = load <vscale x 8 x float>, ptr [[TMP17]], align 4 +; CHECK-UNORDERED-NEXT: [[WIDE_LOAD9:%.*]] = load <vscale x 8 x float>, ptr [[TMP20]], align 4 +; CHECK-UNORDERED-NEXT: [[WIDE_LOAD10:%.*]] = load <vscale x 8 x float>, ptr [[TMP23]], align 4 +; CHECK-UNORDERED-NEXT: [[TMP24]] = call <vscale x 8 x float> @llvm.fmuladd.nxv8f32(<vscale x 8 x float> [[WIDE_LOAD]], <vscale x 8 x float> [[WIDE_LOAD7]], <vscale x 8 x float> [[VEC_PHI]]) +; CHECK-UNORDERED-NEXT: [[TMP25]] = call <vscale x 8 x float> @llvm.fmuladd.nxv8f32(<vscale x 8 x float> [[WIDE_LOAD4]], <vscale x 8 x float> [[WIDE_LOAD8]], <vscale x 8 x float> [[VEC_PHI1]]) +; CHECK-UNORDERED-NEXT: [[TMP26]] = call <vscale x 8 x float> @llvm.fmuladd.nxv8f32(<vscale x 8 x float> [[WIDE_LOAD5]], <vscale x 8 x float> [[WIDE_LOAD9]], <vscale x 8 x float> [[VEC_PHI2]]) +; CHECK-UNORDERED-NEXT: [[TMP27]] = call <vscale x 8 x float> @llvm.fmuladd.nxv8f32(<vscale x 8 x float> [[WIDE_LOAD6]], <vscale x 8 x float> [[WIDE_LOAD10]], <vscale x 8 x float> [[VEC_PHI3]]) +; CHECK-UNORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] +; CHECK-UNORDERED-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-UNORDERED-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK-UNORDERED: middle.block: -; CHECK-UNORDERED-NEXT: [[BIN_RDX:%.*]] = fadd <vscale x 8 x float> [[TMP27]], [[TMP26]] -; CHECK-UNORDERED-NEXT: [[BIN_RDX11:%.*]] = fadd <vscale x 8 x float> [[TMP28]], [[BIN_RDX]] -; CHECK-UNORDERED-NEXT: [[BIN_RDX12:%.*]] = fadd <vscale x 8 x float> [[TMP29]], [[BIN_RDX11]] -; CHECK-UNORDERED-NEXT: [[TMP31:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float -0.000000e+00, <vscale x 8 x float> [[BIN_RDX12]]) +; CHECK-UNORDERED-NEXT: [[BIN_RDX:%.*]] = fadd <vscale x 8 x float> [[TMP25]], [[TMP24]] +; CHECK-UNORDERED-NEXT: [[BIN_RDX11:%.*]] = fadd <vscale x 8 x float> [[TMP26]], [[BIN_RDX]] +; CHECK-UNORDERED-NEXT: [[BIN_RDX12:%.*]] = fadd <vscale x 8 x float> [[TMP27]], [[BIN_RDX11]] +; CHECK-UNORDERED-NEXT: [[TMP29:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float -0.000000e+00, <vscale x 8 x float> [[BIN_RDX12]]) ; CHECK-UNORDERED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-UNORDERED-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] ; CHECK-UNORDERED: scalar.ph: ; CHECK-UNORDERED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] -; CHECK-UNORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP31]], [[MIDDLE_BLOCK]] ], [ 0.000000e+00, [[ENTRY]] ] +; CHECK-UNORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP29]], [[MIDDLE_BLOCK]] ], [ 0.000000e+00, [[ENTRY]] ] ; CHECK-UNORDERED-NEXT: br label [[FOR_BODY:%.*]] ; CHECK-UNORDERED: for.body: ; CHECK-UNORDERED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-UNORDERED-NEXT: [[SUM_07:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[MULADD:%.*]], [[FOR_BODY]] ] ; CHECK-UNORDERED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; CHECK-UNORDERED-NEXT: [[TMP32:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; CHECK-UNORDERED-NEXT: [[TMP30:%.*]] = load float, ptr [[ARRAYIDX]], align 4 ; CHECK-UNORDERED-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]] -; CHECK-UNORDERED-NEXT: [[TMP33:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 -; CHECK-UNORDERED-NEXT: [[MULADD]] = tail call float @llvm.fmuladd.f32(float [[TMP32]], float [[TMP33]], float [[SUM_07]]) +; CHECK-UNORDERED-NEXT: [[TMP31:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 +; CHECK-UNORDERED-NEXT: [[MULADD]] = tail call float @llvm.fmuladd.f32(float [[TMP30]], float [[TMP31]], float [[SUM_07]]) ; CHECK-UNORDERED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-UNORDERED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] ; CHECK-UNORDERED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK-UNORDERED: for.end: -; CHECK-UNORDERED-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], [[FOR_BODY]] ], [ [[TMP31]], [[MIDDLE_BLOCK]] ] +; CHECK-UNORDERED-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], [[FOR_BODY]] ], [ [[TMP29]], [[MIDDLE_BLOCK]] ] ; CHECK-UNORDERED-NEXT: ret float [[MULADD_LCSSA]] ; ; CHECK-ORDERED-LABEL: define float @fmuladd_strict @@ -1453,71 +1399,69 @@ define float @fmuladd_strict(ptr %a, ptr %b, i64 %n) #0 { ; CHECK-ORDERED-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 32 ; CHECK-ORDERED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; CHECK-ORDERED-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; CHECK-ORDERED-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 32 ; CHECK-ORDERED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-ORDERED: vector.body: ; CHECK-ORDERED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-ORDERED-NEXT: [[VEC_PHI:%.*]] = phi float [ 0.000000e+00, [[VECTOR_PH]] ], [ [[TMP33:%.*]], [[VECTOR_BODY]] ] -; CHECK-ORDERED-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] -; CHECK-ORDERED-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 8 -; CHECK-ORDERED-NEXT: [[TMP9:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i64 [[TMP8]] -; CHECK-ORDERED-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 16 -; CHECK-ORDERED-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i64 [[TMP11]] -; CHECK-ORDERED-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-NEXT: [[TMP14:%.*]] = mul nuw i64 [[TMP13]], 24 -; CHECK-ORDERED-NEXT: [[TMP15:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i64 [[TMP14]] -; CHECK-ORDERED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x float>, ptr [[TMP6]], align 4 -; CHECK-ORDERED-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 8 x float>, ptr [[TMP9]], align 4 -; CHECK-ORDERED-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 8 x float>, ptr [[TMP12]], align 4 -; CHECK-ORDERED-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 8 x float>, ptr [[TMP15]], align 4 -; CHECK-ORDERED-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]] -; CHECK-ORDERED-NEXT: [[TMP17:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP17]], 8 -; CHECK-ORDERED-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[TMP18]] -; CHECK-ORDERED-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-NEXT: [[TMP21:%.*]] = mul nuw i64 [[TMP20]], 16 -; CHECK-ORDERED-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[TMP21]] -; CHECK-ORDERED-NEXT: [[TMP23:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-NEXT: [[TMP24:%.*]] = mul nuw i64 [[TMP23]], 24 -; CHECK-ORDERED-NEXT: [[TMP25:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[TMP24]] -; CHECK-ORDERED-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 8 x float>, ptr [[TMP16]], align 4 -; CHECK-ORDERED-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 8 x float>, ptr [[TMP19]], align 4 -; CHECK-ORDERED-NEXT: [[WIDE_LOAD6:%.*]] = load <vscale x 8 x float>, ptr [[TMP22]], align 4 -; CHECK-ORDERED-NEXT: [[WIDE_LOAD7:%.*]] = load <vscale x 8 x float>, ptr [[TMP25]], align 4 -; CHECK-ORDERED-NEXT: [[TMP26:%.*]] = fmul <vscale x 8 x float> [[WIDE_LOAD]], [[WIDE_LOAD4]] -; CHECK-ORDERED-NEXT: [[TMP27:%.*]] = fmul <vscale x 8 x float> [[WIDE_LOAD1]], [[WIDE_LOAD5]] -; CHECK-ORDERED-NEXT: [[TMP28:%.*]] = fmul <vscale x 8 x float> [[WIDE_LOAD2]], [[WIDE_LOAD6]] -; CHECK-ORDERED-NEXT: [[TMP29:%.*]] = fmul <vscale x 8 x float> [[WIDE_LOAD3]], [[WIDE_LOAD7]] -; CHECK-ORDERED-NEXT: [[TMP30:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[VEC_PHI]], <vscale x 8 x float> [[TMP26]]) -; CHECK-ORDERED-NEXT: [[TMP31:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP30]], <vscale x 8 x float> [[TMP27]]) -; CHECK-ORDERED-NEXT: [[TMP32:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP31]], <vscale x 8 x float> [[TMP28]]) -; CHECK-ORDERED-NEXT: [[TMP33]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP32]], <vscale x 8 x float> [[TMP29]]) -; CHECK-ORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-ORDERED-NEXT: [[TMP34:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-ORDERED-NEXT: br i1 [[TMP34]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK-ORDERED-NEXT: [[VEC_PHI:%.*]] = phi float [ 0.000000e+00, [[VECTOR_PH]] ], [ [[TMP31:%.*]], [[VECTOR_BODY]] ] +; CHECK-ORDERED-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] +; CHECK-ORDERED-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-ORDERED-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 8 +; CHECK-ORDERED-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i64 [[TMP6]] +; CHECK-ORDERED-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-ORDERED-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 16 +; CHECK-ORDERED-NEXT: [[TMP10:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i64 [[TMP9]] +; CHECK-ORDERED-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-ORDERED-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 24 +; CHECK-ORDERED-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i64 [[TMP12]] +; CHECK-ORDERED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x float>, ptr [[TMP4]], align 4 +; CHECK-ORDERED-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 8 x float>, ptr [[TMP7]], align 4 +; CHECK-ORDERED-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 8 x float>, ptr [[TMP10]], align 4 +; CHECK-ORDERED-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 8 x float>, ptr [[TMP13]], align 4 +; CHECK-ORDERED-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]] +; CHECK-ORDERED-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-ORDERED-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP15]], 8 +; CHECK-ORDERED-NEXT: [[TMP17:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i64 [[TMP16]] +; CHECK-ORDERED-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-ORDERED-NEXT: [[TMP19:%.*]] = mul nuw i64 [[TMP18]], 16 +; CHECK-ORDERED-NEXT: [[TMP20:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i64 [[TMP19]] +; CHECK-ORDERED-NEXT: [[TMP21:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-ORDERED-NEXT: [[TMP22:%.*]] = mul nuw i64 [[TMP21]], 24 +; CHECK-ORDERED-NEXT: [[TMP23:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i64 [[TMP22]] +; CHECK-ORDERED-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 8 x float>, ptr [[TMP14]], align 4 +; CHECK-ORDERED-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 8 x float>, ptr [[TMP17]], align 4 +; CHECK-ORDERED-NEXT: [[WIDE_LOAD6:%.*]] = load <vscale x 8 x float>, ptr [[TMP20]], align 4 +; CHECK-ORDERED-NEXT: [[WIDE_LOAD7:%.*]] = load <vscale x 8 x float>, ptr [[TMP23]], align 4 +; CHECK-ORDERED-NEXT: [[TMP24:%.*]] = fmul <vscale x 8 x float> [[WIDE_LOAD]], [[WIDE_LOAD4]] +; CHECK-ORDERED-NEXT: [[TMP25:%.*]] = fmul <vscale x 8 x float> [[WIDE_LOAD1]], [[WIDE_LOAD5]] +; CHECK-ORDERED-NEXT: [[TMP26:%.*]] = fmul <vscale x 8 x float> [[WIDE_LOAD2]], [[WIDE_LOAD6]] +; CHECK-ORDERED-NEXT: [[TMP27:%.*]] = fmul <vscale x 8 x float> [[WIDE_LOAD3]], [[WIDE_LOAD7]] +; CHECK-ORDERED-NEXT: [[TMP28:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[VEC_PHI]], <vscale x 8 x float> [[TMP24]]) +; CHECK-ORDERED-NEXT: [[TMP29:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP28]], <vscale x 8 x float> [[TMP25]]) +; CHECK-ORDERED-NEXT: [[TMP30:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP29]], <vscale x 8 x float> [[TMP26]]) +; CHECK-ORDERED-NEXT: [[TMP31]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP30]], <vscale x 8 x float> [[TMP27]]) +; CHECK-ORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] +; CHECK-ORDERED-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-ORDERED-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK-ORDERED: middle.block: ; CHECK-ORDERED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-ORDERED-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] ; CHECK-ORDERED: scalar.ph: ; CHECK-ORDERED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] -; CHECK-ORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP33]], [[MIDDLE_BLOCK]] ], [ 0.000000e+00, [[ENTRY]] ] +; CHECK-ORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP31]], [[MIDDLE_BLOCK]] ], [ 0.000000e+00, [[ENTRY]] ] ; CHECK-ORDERED-NEXT: br label [[FOR_BODY:%.*]] ; CHECK-ORDERED: for.body: ; CHECK-ORDERED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-ORDERED-NEXT: [[SUM_07:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[MULADD:%.*]], [[FOR_BODY]] ] ; CHECK-ORDERED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; CHECK-ORDERED-NEXT: [[TMP35:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; CHECK-ORDERED-NEXT: [[TMP33:%.*]] = load float, ptr [[ARRAYIDX]], align 4 ; CHECK-ORDERED-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]] -; CHECK-ORDERED-NEXT: [[TMP36:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 -; CHECK-ORDERED-NEXT: [[MULADD]] = tail call float @llvm.fmuladd.f32(float [[TMP35]], float [[TMP36]], float [[SUM_07]]) +; CHECK-ORDERED-NEXT: [[TMP34:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 +; CHECK-ORDERED-NEXT: [[MULADD]] = tail call float @llvm.fmuladd.f32(float [[TMP33]], float [[TMP34]], float [[SUM_07]]) ; CHECK-ORDERED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-ORDERED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] ; CHECK-ORDERED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK-ORDERED: for.end: -; CHECK-ORDERED-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], [[FOR_BODY]] ], [ [[TMP33]], [[MIDDLE_BLOCK]] ] +; CHECK-ORDERED-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], [[FOR_BODY]] ], [ [[TMP31]], [[MIDDLE_BLOCK]] ] ; CHECK-ORDERED-NEXT: ret float [[MULADD_LCSSA]] ; ; CHECK-ORDERED-TF-LABEL: define float @fmuladd_strict @@ -1527,26 +1471,20 @@ define float @fmuladd_strict(ptr %a, ptr %b, i64 %n) #0 { ; CHECK-ORDERED-TF: vector.ph: ; CHECK-ORDERED-TF-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-ORDERED-TF-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 32 -; CHECK-ORDERED-TF-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 -; CHECK-ORDERED-TF-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP2]] -; CHECK-ORDERED-TF-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; CHECK-ORDERED-TF-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] -; CHECK-ORDERED-TF-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-TF-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 32 -; CHECK-ORDERED-TF-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-TF-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 32 -; CHECK-ORDERED-TF-NEXT: [[TMP7:%.*]] = sub i64 [[N]], [[TMP6]] -; CHECK-ORDERED-TF-NEXT: [[TMP8:%.*]] = icmp ugt i64 [[N]], [[TMP6]] -; CHECK-ORDERED-TF-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], i64 [[TMP7]], i64 0 -; CHECK-ORDERED-TF-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-TF-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 8 -; CHECK-ORDERED-TF-NEXT: [[INDEX_PART_NEXT:%.*]] = add i64 0, [[TMP11]] -; CHECK-ORDERED-TF-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-TF-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP12]], 16 -; CHECK-ORDERED-TF-NEXT: [[INDEX_PART_NEXT1:%.*]] = add i64 0, [[TMP13]] -; CHECK-ORDERED-TF-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-TF-NEXT: [[TMP15:%.*]] = mul nuw i64 [[TMP14]], 24 -; CHECK-ORDERED-TF-NEXT: [[INDEX_PART_NEXT2:%.*]] = add i64 0, [[TMP15]] +; CHECK-ORDERED-TF-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-ORDERED-TF-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 32 +; CHECK-ORDERED-TF-NEXT: [[TMP4:%.*]] = sub i64 [[N]], [[TMP3]] +; CHECK-ORDERED-TF-NEXT: [[TMP5:%.*]] = icmp ugt i64 [[N]], [[TMP3]] +; CHECK-ORDERED-TF-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], i64 [[TMP4]], i64 0 +; CHECK-ORDERED-TF-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-ORDERED-TF-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 8 +; CHECK-ORDERED-TF-NEXT: [[INDEX_PART_NEXT:%.*]] = add i64 0, [[TMP8]] +; CHECK-ORDERED-TF-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-ORDERED-TF-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 16 +; CHECK-ORDERED-TF-NEXT: [[INDEX_PART_NEXT1:%.*]] = add i64 0, [[TMP10]] +; CHECK-ORDERED-TF-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-ORDERED-TF-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 24 +; CHECK-ORDERED-TF-NEXT: [[INDEX_PART_NEXT2:%.*]] = add i64 0, [[TMP12]] ; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 0, i64 [[N]]) ; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_ENTRY3:%.*]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[INDEX_PART_NEXT]], i64 [[N]]) ; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_ENTRY4:%.*]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[INDEX_PART_NEXT1]], i64 [[N]]) @@ -1558,64 +1496,64 @@ define float @fmuladd_strict(ptr %a, ptr %b, i64 %n) #0 { ; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK6:%.*]] = phi <vscale x 8 x i1> [ [[ACTIVE_LANE_MASK_ENTRY3]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT16:%.*]], [[VECTOR_BODY]] ] ; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK7:%.*]] = phi <vscale x 8 x i1> [ [[ACTIVE_LANE_MASK_ENTRY4]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT17:%.*]], [[VECTOR_BODY]] ] ; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK8:%.*]] = phi <vscale x 8 x i1> [ [[ACTIVE_LANE_MASK_ENTRY5]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT18:%.*]], [[VECTOR_BODY]] ] -; CHECK-ORDERED-TF-NEXT: [[VEC_PHI:%.*]] = phi float [ 0.000000e+00, [[VECTOR_PH]] ], [ [[TMP47:%.*]], [[VECTOR_BODY]] ] -; CHECK-ORDERED-TF-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] +; CHECK-ORDERED-TF-NEXT: [[VEC_PHI:%.*]] = phi float [ 0.000000e+00, [[VECTOR_PH]] ], [ [[TMP44:%.*]], [[VECTOR_BODY]] ] +; CHECK-ORDERED-TF-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] +; CHECK-ORDERED-TF-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-ORDERED-TF-NEXT: [[TMP15:%.*]] = mul nuw i64 [[TMP14]], 8 +; CHECK-ORDERED-TF-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i64 [[TMP15]] ; CHECK-ORDERED-TF-NEXT: [[TMP17:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-TF-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP17]], 8 -; CHECK-ORDERED-TF-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[TMP18]] +; CHECK-ORDERED-TF-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP17]], 16 +; CHECK-ORDERED-TF-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i64 [[TMP18]] ; CHECK-ORDERED-TF-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-TF-NEXT: [[TMP21:%.*]] = mul nuw i64 [[TMP20]], 16 -; CHECK-ORDERED-TF-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[TMP21]] -; CHECK-ORDERED-TF-NEXT: [[TMP23:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-TF-NEXT: [[TMP24:%.*]] = mul nuw i64 [[TMP23]], 24 -; CHECK-ORDERED-TF-NEXT: [[TMP25:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[TMP24]] -; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP16]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK]], <vscale x 8 x float> poison) -; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD9:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP19]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK6]], <vscale x 8 x float> poison) -; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD10:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP22]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK7]], <vscale x 8 x float> poison) -; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD11:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP25]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK8]], <vscale x 8 x float> poison) -; CHECK-ORDERED-TF-NEXT: [[TMP26:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]] +; CHECK-ORDERED-TF-NEXT: [[TMP21:%.*]] = mul nuw i64 [[TMP20]], 24 +; CHECK-ORDERED-TF-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i64 [[TMP21]] +; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP13]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK]], <vscale x 8 x float> poison) +; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD9:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP16]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK6]], <vscale x 8 x float> poison) +; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD10:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP19]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK7]], <vscale x 8 x float> poison) +; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD11:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP22]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK8]], <vscale x 8 x float> poison) +; CHECK-ORDERED-TF-NEXT: [[TMP23:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]] +; CHECK-ORDERED-TF-NEXT: [[TMP24:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-ORDERED-TF-NEXT: [[TMP25:%.*]] = mul nuw i64 [[TMP24]], 8 +; CHECK-ORDERED-TF-NEXT: [[TMP26:%.*]] = getelementptr inbounds float, ptr [[TMP23]], i64 [[TMP25]] ; CHECK-ORDERED-TF-NEXT: [[TMP27:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-TF-NEXT: [[TMP28:%.*]] = mul nuw i64 [[TMP27]], 8 -; CHECK-ORDERED-TF-NEXT: [[TMP29:%.*]] = getelementptr inbounds float, ptr [[TMP26]], i64 [[TMP28]] +; CHECK-ORDERED-TF-NEXT: [[TMP28:%.*]] = mul nuw i64 [[TMP27]], 16 +; CHECK-ORDERED-TF-NEXT: [[TMP29:%.*]] = getelementptr inbounds float, ptr [[TMP23]], i64 [[TMP28]] ; CHECK-ORDERED-TF-NEXT: [[TMP30:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-TF-NEXT: [[TMP31:%.*]] = mul nuw i64 [[TMP30]], 16 -; CHECK-ORDERED-TF-NEXT: [[TMP32:%.*]] = getelementptr inbounds float, ptr [[TMP26]], i64 [[TMP31]] -; CHECK-ORDERED-TF-NEXT: [[TMP33:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-TF-NEXT: [[TMP34:%.*]] = mul nuw i64 [[TMP33]], 24 -; CHECK-ORDERED-TF-NEXT: [[TMP35:%.*]] = getelementptr inbounds float, ptr [[TMP26]], i64 [[TMP34]] -; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD12:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP26]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK]], <vscale x 8 x float> poison) -; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD13:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP29]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK6]], <vscale x 8 x float> poison) -; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD14:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP32]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK7]], <vscale x 8 x float> poison) -; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD15:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP35]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK8]], <vscale x 8 x float> poison) -; CHECK-ORDERED-TF-NEXT: [[TMP36:%.*]] = fmul <vscale x 8 x float> [[WIDE_MASKED_LOAD]], [[WIDE_MASKED_LOAD12]] -; CHECK-ORDERED-TF-NEXT: [[TMP37:%.*]] = fmul <vscale x 8 x float> [[WIDE_MASKED_LOAD9]], [[WIDE_MASKED_LOAD13]] -; CHECK-ORDERED-TF-NEXT: [[TMP38:%.*]] = fmul <vscale x 8 x float> [[WIDE_MASKED_LOAD10]], [[WIDE_MASKED_LOAD14]] -; CHECK-ORDERED-TF-NEXT: [[TMP39:%.*]] = fmul <vscale x 8 x float> [[WIDE_MASKED_LOAD11]], [[WIDE_MASKED_LOAD15]] -; CHECK-ORDERED-TF-NEXT: [[TMP40:%.*]] = select <vscale x 8 x i1> [[ACTIVE_LANE_MASK]], <vscale x 8 x float> [[TMP36]], <vscale x 8 x float> splat (float -0.000000e+00) -; CHECK-ORDERED-TF-NEXT: [[TMP41:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[VEC_PHI]], <vscale x 8 x float> [[TMP40]]) -; CHECK-ORDERED-TF-NEXT: [[TMP42:%.*]] = select <vscale x 8 x i1> [[ACTIVE_LANE_MASK6]], <vscale x 8 x float> [[TMP37]], <vscale x 8 x float> splat (float -0.000000e+00) -; CHECK-ORDERED-TF-NEXT: [[TMP43:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP41]], <vscale x 8 x float> [[TMP42]]) -; CHECK-ORDERED-TF-NEXT: [[TMP44:%.*]] = select <vscale x 8 x i1> [[ACTIVE_LANE_MASK7]], <vscale x 8 x float> [[TMP38]], <vscale x 8 x float> splat (float -0.000000e+00) -; CHECK-ORDERED-TF-NEXT: [[TMP45:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP43]], <vscale x 8 x float> [[TMP44]]) -; CHECK-ORDERED-TF-NEXT: [[TMP46:%.*]] = select <vscale x 8 x i1> [[ACTIVE_LANE_MASK8]], <vscale x 8 x float> [[TMP39]], <vscale x 8 x float> splat (float -0.000000e+00) -; CHECK-ORDERED-TF-NEXT: [[TMP47]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP45]], <vscale x 8 x float> [[TMP46]]) -; CHECK-ORDERED-TF-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP4]] +; CHECK-ORDERED-TF-NEXT: [[TMP31:%.*]] = mul nuw i64 [[TMP30]], 24 +; CHECK-ORDERED-TF-NEXT: [[TMP32:%.*]] = getelementptr inbounds float, ptr [[TMP23]], i64 [[TMP31]] +; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD12:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP23]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK]], <vscale x 8 x float> poison) +; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD13:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP26]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK6]], <vscale x 8 x float> poison) +; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD14:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP29]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK7]], <vscale x 8 x float> poison) +; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD15:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP32]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK8]], <vscale x 8 x float> poison) +; CHECK-ORDERED-TF-NEXT: [[TMP33:%.*]] = fmul <vscale x 8 x float> [[WIDE_MASKED_LOAD]], [[WIDE_MASKED_LOAD12]] +; CHECK-ORDERED-TF-NEXT: [[TMP34:%.*]] = fmul <vscale x 8 x float> [[WIDE_MASKED_LOAD9]], [[WIDE_MASKED_LOAD13]] +; CHECK-ORDERED-TF-NEXT: [[TMP35:%.*]] = fmul <vscale x 8 x float> [[WIDE_MASKED_LOAD10]], [[WIDE_MASKED_LOAD14]] +; CHECK-ORDERED-TF-NEXT: [[TMP36:%.*]] = fmul <vscale x 8 x float> [[WIDE_MASKED_LOAD11]], [[WIDE_MASKED_LOAD15]] +; CHECK-ORDERED-TF-NEXT: [[TMP37:%.*]] = select <vscale x 8 x i1> [[ACTIVE_LANE_MASK]], <vscale x 8 x float> [[TMP33]], <vscale x 8 x float> splat (float -0.000000e+00) +; CHECK-ORDERED-TF-NEXT: [[TMP38:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[VEC_PHI]], <vscale x 8 x float> [[TMP37]]) +; CHECK-ORDERED-TF-NEXT: [[TMP39:%.*]] = select <vscale x 8 x i1> [[ACTIVE_LANE_MASK6]], <vscale x 8 x float> [[TMP34]], <vscale x 8 x float> splat (float -0.000000e+00) +; CHECK-ORDERED-TF-NEXT: [[TMP40:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP38]], <vscale x 8 x float> [[TMP39]]) +; CHECK-ORDERED-TF-NEXT: [[TMP41:%.*]] = select <vscale x 8 x i1> [[ACTIVE_LANE_MASK7]], <vscale x 8 x float> [[TMP35]], <vscale x 8 x float> splat (float -0.000000e+00) +; CHECK-ORDERED-TF-NEXT: [[TMP42:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP40]], <vscale x 8 x float> [[TMP41]]) +; CHECK-ORDERED-TF-NEXT: [[TMP43:%.*]] = select <vscale x 8 x i1> [[ACTIVE_LANE_MASK8]], <vscale x 8 x float> [[TMP36]], <vscale x 8 x float> splat (float -0.000000e+00) +; CHECK-ORDERED-TF-NEXT: [[TMP44]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP42]], <vscale x 8 x float> [[TMP43]]) +; CHECK-ORDERED-TF-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]] +; CHECK-ORDERED-TF-NEXT: [[TMP45:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-ORDERED-TF-NEXT: [[TMP46:%.*]] = mul nuw i64 [[TMP45]], 8 +; CHECK-ORDERED-TF-NEXT: [[TMP47:%.*]] = add i64 [[INDEX]], [[TMP46]] ; CHECK-ORDERED-TF-NEXT: [[TMP48:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-TF-NEXT: [[TMP49:%.*]] = mul nuw i64 [[TMP48]], 8 +; CHECK-ORDERED-TF-NEXT: [[TMP49:%.*]] = mul nuw i64 [[TMP48]], 16 ; CHECK-ORDERED-TF-NEXT: [[TMP50:%.*]] = add i64 [[INDEX]], [[TMP49]] ; CHECK-ORDERED-TF-NEXT: [[TMP51:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-TF-NEXT: [[TMP52:%.*]] = mul nuw i64 [[TMP51]], 16 +; CHECK-ORDERED-TF-NEXT: [[TMP52:%.*]] = mul nuw i64 [[TMP51]], 24 ; CHECK-ORDERED-TF-NEXT: [[TMP53:%.*]] = add i64 [[INDEX]], [[TMP52]] -; CHECK-ORDERED-TF-NEXT: [[TMP54:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-TF-NEXT: [[TMP55:%.*]] = mul nuw i64 [[TMP54]], 24 -; CHECK-ORDERED-TF-NEXT: [[TMP56:%.*]] = add i64 [[INDEX]], [[TMP55]] -; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[INDEX]], i64 [[TMP9]]) -; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT16]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP50]], i64 [[TMP9]]) -; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT17]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP53]], i64 [[TMP9]]) -; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT18]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP56]], i64 [[TMP9]]) -; CHECK-ORDERED-TF-NEXT: [[TMP57:%.*]] = xor <vscale x 8 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) -; CHECK-ORDERED-TF-NEXT: [[TMP58:%.*]] = extractelement <vscale x 8 x i1> [[TMP57]], i32 0 -; CHECK-ORDERED-TF-NEXT: br i1 [[TMP58]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[INDEX]], i64 [[TMP6]]) +; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT16]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP47]], i64 [[TMP6]]) +; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT17]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP50]], i64 [[TMP6]]) +; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT18]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP53]], i64 [[TMP6]]) +; CHECK-ORDERED-TF-NEXT: [[TMP54:%.*]] = xor <vscale x 8 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) +; CHECK-ORDERED-TF-NEXT: [[TMP55:%.*]] = extractelement <vscale x 8 x i1> [[TMP54]], i32 0 +; CHECK-ORDERED-TF-NEXT: br i1 [[TMP55]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK-ORDERED-TF: middle.block: ; CHECK-ORDERED-TF-NEXT: br label [[FOR_END:%.*]] ; CHECK-ORDERED-TF: scalar.ph: @@ -1626,15 +1564,15 @@ define float @fmuladd_strict(ptr %a, ptr %b, i64 %n) #0 { ; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-ORDERED-TF-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[MULADD:%.*]], [[FOR_BODY]] ] ; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; CHECK-ORDERED-TF-NEXT: [[TMP59:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; CHECK-ORDERED-TF-NEXT: [[TMP56:%.*]] = load float, ptr [[ARRAYIDX]], align 4 ; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]] -; CHECK-ORDERED-TF-NEXT: [[TMP60:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 -; CHECK-ORDERED-TF-NEXT: [[MULADD]] = tail call float @llvm.fmuladd.f32(float [[TMP59]], float [[TMP60]], float [[SUM_07]]) +; CHECK-ORDERED-TF-NEXT: [[TMP57:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 +; CHECK-ORDERED-TF-NEXT: [[MULADD]] = tail call float @llvm.fmuladd.f32(float [[TMP56]], float [[TMP57]], float [[SUM_07]]) ; CHECK-ORDERED-TF-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-ORDERED-TF-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] ; CHECK-ORDERED-TF-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK-ORDERED-TF: for.end: -; CHECK-ORDERED-TF-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], [[FOR_BODY]] ], [ [[TMP47]], [[MIDDLE_BLOCK]] ] +; CHECK-ORDERED-TF-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], [[FOR_BODY]] ], [ [[TMP44]], [[MIDDLE_BLOCK]] ] ; CHECK-ORDERED-TF-NEXT: ret float [[MULADD_LCSSA]] ; @@ -1693,74 +1631,72 @@ define float @fmuladd_strict_fmf(ptr %a, ptr %b, i64 %n) #0 { ; CHECK-UNORDERED-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 32 ; CHECK-UNORDERED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; CHECK-UNORDERED-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; CHECK-UNORDERED-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-UNORDERED-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 32 ; CHECK-UNORDERED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-UNORDERED: vector.body: ; CHECK-UNORDERED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-UNORDERED-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x float> [ insertelement (<vscale x 8 x float> splat (float -0.000000e+00), float 0.000000e+00, i32 0), [[VECTOR_PH]] ], [ [[TMP26:%.*]], [[VECTOR_BODY]] ] -; CHECK-UNORDERED-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 8 x float> [ splat (float -0.000000e+00), [[VECTOR_PH]] ], [ [[TMP27:%.*]], [[VECTOR_BODY]] ] -; CHECK-UNORDERED-NEXT: [[VEC_PHI2:%.*]] = phi <vscale x 8 x float> [ splat (float -0.000000e+00), [[VECTOR_PH]] ], [ [[TMP28:%.*]], [[VECTOR_BODY]] ] -; CHECK-UNORDERED-NEXT: [[VEC_PHI3:%.*]] = phi <vscale x 8 x float> [ splat (float -0.000000e+00), [[VECTOR_PH]] ], [ [[TMP29:%.*]], [[VECTOR_BODY]] ] -; CHECK-UNORDERED-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] -; CHECK-UNORDERED-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-UNORDERED-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 8 -; CHECK-UNORDERED-NEXT: [[TMP9:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i64 [[TMP8]] -; CHECK-UNORDERED-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-UNORDERED-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 16 -; CHECK-UNORDERED-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i64 [[TMP11]] -; CHECK-UNORDERED-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-UNORDERED-NEXT: [[TMP14:%.*]] = mul nuw i64 [[TMP13]], 24 -; CHECK-UNORDERED-NEXT: [[TMP15:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i64 [[TMP14]] -; CHECK-UNORDERED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x float>, ptr [[TMP6]], align 4 -; CHECK-UNORDERED-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 8 x float>, ptr [[TMP9]], align 4 -; CHECK-UNORDERED-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 8 x float>, ptr [[TMP12]], align 4 -; CHECK-UNORDERED-NEXT: [[WIDE_LOAD6:%.*]] = load <vscale x 8 x float>, ptr [[TMP15]], align 4 -; CHECK-UNORDERED-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]] -; CHECK-UNORDERED-NEXT: [[TMP17:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-UNORDERED-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP17]], 8 -; CHECK-UNORDERED-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[TMP18]] -; CHECK-UNORDERED-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-UNORDERED-NEXT: [[TMP21:%.*]] = mul nuw i64 [[TMP20]], 16 -; CHECK-UNORDERED-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[TMP21]] -; CHECK-UNORDERED-NEXT: [[TMP23:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-UNORDERED-NEXT: [[TMP24:%.*]] = mul nuw i64 [[TMP23]], 24 -; CHECK-UNORDERED-NEXT: [[TMP25:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[TMP24]] -; CHECK-UNORDERED-NEXT: [[WIDE_LOAD7:%.*]] = load <vscale x 8 x float>, ptr [[TMP16]], align 4 -; CHECK-UNORDERED-NEXT: [[WIDE_LOAD8:%.*]] = load <vscale x 8 x float>, ptr [[TMP19]], align 4 -; CHECK-UNORDERED-NEXT: [[WIDE_LOAD9:%.*]] = load <vscale x 8 x float>, ptr [[TMP22]], align 4 -; CHECK-UNORDERED-NEXT: [[WIDE_LOAD10:%.*]] = load <vscale x 8 x float>, ptr [[TMP25]], align 4 -; CHECK-UNORDERED-NEXT: [[TMP26]] = call nnan <vscale x 8 x float> @llvm.fmuladd.nxv8f32(<vscale x 8 x float> [[WIDE_LOAD]], <vscale x 8 x float> [[WIDE_LOAD7]], <vscale x 8 x float> [[VEC_PHI]]) -; CHECK-UNORDERED-NEXT: [[TMP27]] = call nnan <vscale x 8 x float> @llvm.fmuladd.nxv8f32(<vscale x 8 x float> [[WIDE_LOAD4]], <vscale x 8 x float> [[WIDE_LOAD8]], <vscale x 8 x float> [[VEC_PHI1]]) -; CHECK-UNORDERED-NEXT: [[TMP28]] = call nnan <vscale x 8 x float> @llvm.fmuladd.nxv8f32(<vscale x 8 x float> [[WIDE_LOAD5]], <vscale x 8 x float> [[WIDE_LOAD9]], <vscale x 8 x float> [[VEC_PHI2]]) -; CHECK-UNORDERED-NEXT: [[TMP29]] = call nnan <vscale x 8 x float> @llvm.fmuladd.nxv8f32(<vscale x 8 x float> [[WIDE_LOAD6]], <vscale x 8 x float> [[WIDE_LOAD10]], <vscale x 8 x float> [[VEC_PHI3]]) -; CHECK-UNORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-UNORDERED-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-UNORDERED-NEXT: br i1 [[TMP30]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK-UNORDERED-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x float> [ insertelement (<vscale x 8 x float> splat (float -0.000000e+00), float 0.000000e+00, i32 0), [[VECTOR_PH]] ], [ [[TMP24:%.*]], [[VECTOR_BODY]] ] +; CHECK-UNORDERED-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 8 x float> [ splat (float -0.000000e+00), [[VECTOR_PH]] ], [ [[TMP25:%.*]], [[VECTOR_BODY]] ] +; CHECK-UNORDERED-NEXT: [[VEC_PHI2:%.*]] = phi <vscale x 8 x float> [ splat (float -0.000000e+00), [[VECTOR_PH]] ], [ [[TMP26:%.*]], [[VECTOR_BODY]] ] +; CHECK-UNORDERED-NEXT: [[VEC_PHI3:%.*]] = phi <vscale x 8 x float> [ splat (float -0.000000e+00), [[VECTOR_PH]] ], [ [[TMP27:%.*]], [[VECTOR_BODY]] ] +; CHECK-UNORDERED-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] +; CHECK-UNORDERED-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-UNORDERED-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 8 +; CHECK-UNORDERED-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i64 [[TMP6]] +; CHECK-UNORDERED-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-UNORDERED-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 16 +; CHECK-UNORDERED-NEXT: [[TMP10:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i64 [[TMP9]] +; CHECK-UNORDERED-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-UNORDERED-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 24 +; CHECK-UNORDERED-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i64 [[TMP12]] +; CHECK-UNORDERED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x float>, ptr [[TMP4]], align 4 +; CHECK-UNORDERED-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 8 x float>, ptr [[TMP7]], align 4 +; CHECK-UNORDERED-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 8 x float>, ptr [[TMP10]], align 4 +; CHECK-UNORDERED-NEXT: [[WIDE_LOAD6:%.*]] = load <vscale x 8 x float>, ptr [[TMP13]], align 4 +; CHECK-UNORDERED-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]] +; CHECK-UNORDERED-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-UNORDERED-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP15]], 8 +; CHECK-UNORDERED-NEXT: [[TMP17:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i64 [[TMP16]] +; CHECK-UNORDERED-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-UNORDERED-NEXT: [[TMP19:%.*]] = mul nuw i64 [[TMP18]], 16 +; CHECK-UNORDERED-NEXT: [[TMP20:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i64 [[TMP19]] +; CHECK-UNORDERED-NEXT: [[TMP21:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-UNORDERED-NEXT: [[TMP22:%.*]] = mul nuw i64 [[TMP21]], 24 +; CHECK-UNORDERED-NEXT: [[TMP23:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i64 [[TMP22]] +; CHECK-UNORDERED-NEXT: [[WIDE_LOAD7:%.*]] = load <vscale x 8 x float>, ptr [[TMP14]], align 4 +; CHECK-UNORDERED-NEXT: [[WIDE_LOAD8:%.*]] = load <vscale x 8 x float>, ptr [[TMP17]], align 4 +; CHECK-UNORDERED-NEXT: [[WIDE_LOAD9:%.*]] = load <vscale x 8 x float>, ptr [[TMP20]], align 4 +; CHECK-UNORDERED-NEXT: [[WIDE_LOAD10:%.*]] = load <vscale x 8 x float>, ptr [[TMP23]], align 4 +; CHECK-UNORDERED-NEXT: [[TMP24]] = call nnan <vscale x 8 x float> @llvm.fmuladd.nxv8f32(<vscale x 8 x float> [[WIDE_LOAD]], <vscale x 8 x float> [[WIDE_LOAD7]], <vscale x 8 x float> [[VEC_PHI]]) +; CHECK-UNORDERED-NEXT: [[TMP25]] = call nnan <vscale x 8 x float> @llvm.fmuladd.nxv8f32(<vscale x 8 x float> [[WIDE_LOAD4]], <vscale x 8 x float> [[WIDE_LOAD8]], <vscale x 8 x float> [[VEC_PHI1]]) +; CHECK-UNORDERED-NEXT: [[TMP26]] = call nnan <vscale x 8 x float> @llvm.fmuladd.nxv8f32(<vscale x 8 x float> [[WIDE_LOAD5]], <vscale x 8 x float> [[WIDE_LOAD9]], <vscale x 8 x float> [[VEC_PHI2]]) +; CHECK-UNORDERED-NEXT: [[TMP27]] = call nnan <vscale x 8 x float> @llvm.fmuladd.nxv8f32(<vscale x 8 x float> [[WIDE_LOAD6]], <vscale x 8 x float> [[WIDE_LOAD10]], <vscale x 8 x float> [[VEC_PHI3]]) +; CHECK-UNORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] +; CHECK-UNORDERED-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-UNORDERED-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK-UNORDERED: middle.block: -; CHECK-UNORDERED-NEXT: [[BIN_RDX:%.*]] = fadd nnan <vscale x 8 x float> [[TMP27]], [[TMP26]] -; CHECK-UNORDERED-NEXT: [[BIN_RDX11:%.*]] = fadd nnan <vscale x 8 x float> [[TMP28]], [[BIN_RDX]] -; CHECK-UNORDERED-NEXT: [[BIN_RDX12:%.*]] = fadd nnan <vscale x 8 x float> [[TMP29]], [[BIN_RDX11]] -; CHECK-UNORDERED-NEXT: [[TMP31:%.*]] = call nnan float @llvm.vector.reduce.fadd.nxv8f32(float -0.000000e+00, <vscale x 8 x float> [[BIN_RDX12]]) +; CHECK-UNORDERED-NEXT: [[BIN_RDX:%.*]] = fadd nnan <vscale x 8 x float> [[TMP25]], [[TMP24]] +; CHECK-UNORDERED-NEXT: [[BIN_RDX11:%.*]] = fadd nnan <vscale x 8 x float> [[TMP26]], [[BIN_RDX]] +; CHECK-UNORDERED-NEXT: [[BIN_RDX12:%.*]] = fadd nnan <vscale x 8 x float> [[TMP27]], [[BIN_RDX11]] +; CHECK-UNORDERED-NEXT: [[TMP29:%.*]] = call nnan float @llvm.vector.reduce.fadd.nxv8f32(float -0.000000e+00, <vscale x 8 x float> [[BIN_RDX12]]) ; CHECK-UNORDERED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-UNORDERED-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] ; CHECK-UNORDERED: scalar.ph: ; CHECK-UNORDERED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] -; CHECK-UNORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP31]], [[MIDDLE_BLOCK]] ], [ 0.000000e+00, [[ENTRY]] ] +; CHECK-UNORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP29]], [[MIDDLE_BLOCK]] ], [ 0.000000e+00, [[ENTRY]] ] ; CHECK-UNORDERED-NEXT: br label [[FOR_BODY:%.*]] ; CHECK-UNORDERED: for.body: ; CHECK-UNORDERED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-UNORDERED-NEXT: [[SUM_07:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[MULADD:%.*]], [[FOR_BODY]] ] ; CHECK-UNORDERED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; CHECK-UNORDERED-NEXT: [[TMP32:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; CHECK-UNORDERED-NEXT: [[TMP30:%.*]] = load float, ptr [[ARRAYIDX]], align 4 ; CHECK-UNORDERED-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]] -; CHECK-UNORDERED-NEXT: [[TMP33:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 -; CHECK-UNORDERED-NEXT: [[MULADD]] = tail call nnan float @llvm.fmuladd.f32(float [[TMP32]], float [[TMP33]], float [[SUM_07]]) +; CHECK-UNORDERED-NEXT: [[TMP31:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 +; CHECK-UNORDERED-NEXT: [[MULADD]] = tail call nnan float @llvm.fmuladd.f32(float [[TMP30]], float [[TMP31]], float [[SUM_07]]) ; CHECK-UNORDERED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-UNORDERED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] ; CHECK-UNORDERED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK-UNORDERED: for.end: -; CHECK-UNORDERED-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], [[FOR_BODY]] ], [ [[TMP31]], [[MIDDLE_BLOCK]] ] +; CHECK-UNORDERED-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], [[FOR_BODY]] ], [ [[TMP29]], [[MIDDLE_BLOCK]] ] ; CHECK-UNORDERED-NEXT: ret float [[MULADD_LCSSA]] ; ; CHECK-ORDERED-LABEL: define float @fmuladd_strict_fmf @@ -1775,71 +1711,69 @@ define float @fmuladd_strict_fmf(ptr %a, ptr %b, i64 %n) #0 { ; CHECK-ORDERED-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 32 ; CHECK-ORDERED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; CHECK-ORDERED-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; CHECK-ORDERED-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 32 ; CHECK-ORDERED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-ORDERED: vector.body: ; CHECK-ORDERED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-ORDERED-NEXT: [[VEC_PHI:%.*]] = phi float [ 0.000000e+00, [[VECTOR_PH]] ], [ [[TMP33:%.*]], [[VECTOR_BODY]] ] -; CHECK-ORDERED-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] -; CHECK-ORDERED-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 8 -; CHECK-ORDERED-NEXT: [[TMP9:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i64 [[TMP8]] -; CHECK-ORDERED-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 16 -; CHECK-ORDERED-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i64 [[TMP11]] -; CHECK-ORDERED-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-NEXT: [[TMP14:%.*]] = mul nuw i64 [[TMP13]], 24 -; CHECK-ORDERED-NEXT: [[TMP15:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i64 [[TMP14]] -; CHECK-ORDERED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x float>, ptr [[TMP6]], align 4 -; CHECK-ORDERED-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 8 x float>, ptr [[TMP9]], align 4 -; CHECK-ORDERED-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 8 x float>, ptr [[TMP12]], align 4 -; CHECK-ORDERED-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 8 x float>, ptr [[TMP15]], align 4 -; CHECK-ORDERED-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]] -; CHECK-ORDERED-NEXT: [[TMP17:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP17]], 8 -; CHECK-ORDERED-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[TMP18]] -; CHECK-ORDERED-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-NEXT: [[TMP21:%.*]] = mul nuw i64 [[TMP20]], 16 -; CHECK-ORDERED-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[TMP21]] -; CHECK-ORDERED-NEXT: [[TMP23:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-NEXT: [[TMP24:%.*]] = mul nuw i64 [[TMP23]], 24 -; CHECK-ORDERED-NEXT: [[TMP25:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[TMP24]] -; CHECK-ORDERED-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 8 x float>, ptr [[TMP16]], align 4 -; CHECK-ORDERED-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 8 x float>, ptr [[TMP19]], align 4 -; CHECK-ORDERED-NEXT: [[WIDE_LOAD6:%.*]] = load <vscale x 8 x float>, ptr [[TMP22]], align 4 -; CHECK-ORDERED-NEXT: [[WIDE_LOAD7:%.*]] = load <vscale x 8 x float>, ptr [[TMP25]], align 4 -; CHECK-ORDERED-NEXT: [[TMP26:%.*]] = fmul nnan <vscale x 8 x float> [[WIDE_LOAD]], [[WIDE_LOAD4]] -; CHECK-ORDERED-NEXT: [[TMP27:%.*]] = fmul nnan <vscale x 8 x float> [[WIDE_LOAD1]], [[WIDE_LOAD5]] -; CHECK-ORDERED-NEXT: [[TMP28:%.*]] = fmul nnan <vscale x 8 x float> [[WIDE_LOAD2]], [[WIDE_LOAD6]] -; CHECK-ORDERED-NEXT: [[TMP29:%.*]] = fmul nnan <vscale x 8 x float> [[WIDE_LOAD3]], [[WIDE_LOAD7]] -; CHECK-ORDERED-NEXT: [[TMP30:%.*]] = call nnan float @llvm.vector.reduce.fadd.nxv8f32(float [[VEC_PHI]], <vscale x 8 x float> [[TMP26]]) -; CHECK-ORDERED-NEXT: [[TMP31:%.*]] = call nnan float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP30]], <vscale x 8 x float> [[TMP27]]) -; CHECK-ORDERED-NEXT: [[TMP32:%.*]] = call nnan float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP31]], <vscale x 8 x float> [[TMP28]]) -; CHECK-ORDERED-NEXT: [[TMP33]] = call nnan float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP32]], <vscale x 8 x float> [[TMP29]]) -; CHECK-ORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-ORDERED-NEXT: [[TMP34:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-ORDERED-NEXT: br i1 [[TMP34]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; CHECK-ORDERED-NEXT: [[VEC_PHI:%.*]] = phi float [ 0.000000e+00, [[VECTOR_PH]] ], [ [[TMP31:%.*]], [[VECTOR_BODY]] ] +; CHECK-ORDERED-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] +; CHECK-ORDERED-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-ORDERED-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 8 +; CHECK-ORDERED-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i64 [[TMP6]] +; CHECK-ORDERED-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-ORDERED-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 16 +; CHECK-ORDERED-NEXT: [[TMP10:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i64 [[TMP9]] +; CHECK-ORDERED-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-ORDERED-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 24 +; CHECK-ORDERED-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i64 [[TMP12]] +; CHECK-ORDERED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x float>, ptr [[TMP4]], align 4 +; CHECK-ORDERED-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 8 x float>, ptr [[TMP7]], align 4 +; CHECK-ORDERED-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 8 x float>, ptr [[TMP10]], align 4 +; CHECK-ORDERED-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 8 x float>, ptr [[TMP13]], align 4 +; CHECK-ORDERED-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]] +; CHECK-ORDERED-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-ORDERED-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP15]], 8 +; CHECK-ORDERED-NEXT: [[TMP17:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i64 [[TMP16]] +; CHECK-ORDERED-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-ORDERED-NEXT: [[TMP19:%.*]] = mul nuw i64 [[TMP18]], 16 +; CHECK-ORDERED-NEXT: [[TMP20:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i64 [[TMP19]] +; CHECK-ORDERED-NEXT: [[TMP21:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-ORDERED-NEXT: [[TMP22:%.*]] = mul nuw i64 [[TMP21]], 24 +; CHECK-ORDERED-NEXT: [[TMP23:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i64 [[TMP22]] +; CHECK-ORDERED-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 8 x float>, ptr [[TMP14]], align 4 +; CHECK-ORDERED-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 8 x float>, ptr [[TMP17]], align 4 +; CHECK-ORDERED-NEXT: [[WIDE_LOAD6:%.*]] = load <vscale x 8 x float>, ptr [[TMP20]], align 4 +; CHECK-ORDERED-NEXT: [[WIDE_LOAD7:%.*]] = load <vscale x 8 x float>, ptr [[TMP23]], align 4 +; CHECK-ORDERED-NEXT: [[TMP24:%.*]] = fmul nnan <vscale x 8 x float> [[WIDE_LOAD]], [[WIDE_LOAD4]] +; CHECK-ORDERED-NEXT: [[TMP25:%.*]] = fmul nnan <vscale x 8 x float> [[WIDE_LOAD1]], [[WIDE_LOAD5]] +; CHECK-ORDERED-NEXT: [[TMP26:%.*]] = fmul nnan <vscale x 8 x float> [[WIDE_LOAD2]], [[WIDE_LOAD6]] +; CHECK-ORDERED-NEXT: [[TMP27:%.*]] = fmul nnan <vscale x 8 x float> [[WIDE_LOAD3]], [[WIDE_LOAD7]] +; CHECK-ORDERED-NEXT: [[TMP28:%.*]] = call nnan float @llvm.vector.reduce.fadd.nxv8f32(float [[VEC_PHI]], <vscale x 8 x float> [[TMP24]]) +; CHECK-ORDERED-NEXT: [[TMP29:%.*]] = call nnan float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP28]], <vscale x 8 x float> [[TMP25]]) +; CHECK-ORDERED-NEXT: [[TMP30:%.*]] = call nnan float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP29]], <vscale x 8 x float> [[TMP26]]) +; CHECK-ORDERED-NEXT: [[TMP31]] = call nnan float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP30]], <vscale x 8 x float> [[TMP27]]) +; CHECK-ORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] +; CHECK-ORDERED-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-ORDERED-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; CHECK-ORDERED: middle.block: ; CHECK-ORDERED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-ORDERED-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] ; CHECK-ORDERED: scalar.ph: ; CHECK-ORDERED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] -; CHECK-ORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP33]], [[MIDDLE_BLOCK]] ], [ 0.000000e+00, [[ENTRY]] ] +; CHECK-ORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP31]], [[MIDDLE_BLOCK]] ], [ 0.000000e+00, [[ENTRY]] ] ; CHECK-ORDERED-NEXT: br label [[FOR_BODY:%.*]] ; CHECK-ORDERED: for.body: ; CHECK-ORDERED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-ORDERED-NEXT: [[SUM_07:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[MULADD:%.*]], [[FOR_BODY]] ] ; CHECK-ORDERED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; CHECK-ORDERED-NEXT: [[TMP35:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; CHECK-ORDERED-NEXT: [[TMP33:%.*]] = load float, ptr [[ARRAYIDX]], align 4 ; CHECK-ORDERED-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]] -; CHECK-ORDERED-NEXT: [[TMP36:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 -; CHECK-ORDERED-NEXT: [[MULADD]] = tail call nnan float @llvm.fmuladd.f32(float [[TMP35]], float [[TMP36]], float [[SUM_07]]) +; CHECK-ORDERED-NEXT: [[TMP34:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 +; CHECK-ORDERED-NEXT: [[MULADD]] = tail call nnan float @llvm.fmuladd.f32(float [[TMP33]], float [[TMP34]], float [[SUM_07]]) ; CHECK-ORDERED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-ORDERED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] ; CHECK-ORDERED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; CHECK-ORDERED: for.end: -; CHECK-ORDERED-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], [[FOR_BODY]] ], [ [[TMP33]], [[MIDDLE_BLOCK]] ] +; CHECK-ORDERED-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], [[FOR_BODY]] ], [ [[TMP31]], [[MIDDLE_BLOCK]] ] ; CHECK-ORDERED-NEXT: ret float [[MULADD_LCSSA]] ; ; CHECK-ORDERED-TF-LABEL: define float @fmuladd_strict_fmf @@ -1849,26 +1783,20 @@ define float @fmuladd_strict_fmf(ptr %a, ptr %b, i64 %n) #0 { ; CHECK-ORDERED-TF: vector.ph: ; CHECK-ORDERED-TF-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-ORDERED-TF-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 32 -; CHECK-ORDERED-TF-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 -; CHECK-ORDERED-TF-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP2]] -; CHECK-ORDERED-TF-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; CHECK-ORDERED-TF-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] -; CHECK-ORDERED-TF-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-TF-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 32 -; CHECK-ORDERED-TF-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-TF-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 32 -; CHECK-ORDERED-TF-NEXT: [[TMP7:%.*]] = sub i64 [[N]], [[TMP6]] -; CHECK-ORDERED-TF-NEXT: [[TMP8:%.*]] = icmp ugt i64 [[N]], [[TMP6]] -; CHECK-ORDERED-TF-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], i64 [[TMP7]], i64 0 -; CHECK-ORDERED-TF-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-TF-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 8 -; CHECK-ORDERED-TF-NEXT: [[INDEX_PART_NEXT:%.*]] = add i64 0, [[TMP11]] -; CHECK-ORDERED-TF-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-TF-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP12]], 16 -; CHECK-ORDERED-TF-NEXT: [[INDEX_PART_NEXT1:%.*]] = add i64 0, [[TMP13]] -; CHECK-ORDERED-TF-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-TF-NEXT: [[TMP15:%.*]] = mul nuw i64 [[TMP14]], 24 -; CHECK-ORDERED-TF-NEXT: [[INDEX_PART_NEXT2:%.*]] = add i64 0, [[TMP15]] +; CHECK-ORDERED-TF-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-ORDERED-TF-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 32 +; CHECK-ORDERED-TF-NEXT: [[TMP4:%.*]] = sub i64 [[N]], [[TMP3]] +; CHECK-ORDERED-TF-NEXT: [[TMP5:%.*]] = icmp ugt i64 [[N]], [[TMP3]] +; CHECK-ORDERED-TF-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], i64 [[TMP4]], i64 0 +; CHECK-ORDERED-TF-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-ORDERED-TF-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 8 +; CHECK-ORDERED-TF-NEXT: [[INDEX_PART_NEXT:%.*]] = add i64 0, [[TMP8]] +; CHECK-ORDERED-TF-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-ORDERED-TF-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 16 +; CHECK-ORDERED-TF-NEXT: [[INDEX_PART_NEXT1:%.*]] = add i64 0, [[TMP10]] +; CHECK-ORDERED-TF-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-ORDERED-TF-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 24 +; CHECK-ORDERED-TF-NEXT: [[INDEX_PART_NEXT2:%.*]] = add i64 0, [[TMP12]] ; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 0, i64 [[N]]) ; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_ENTRY3:%.*]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[INDEX_PART_NEXT]], i64 [[N]]) ; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_ENTRY4:%.*]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[INDEX_PART_NEXT1]], i64 [[N]]) @@ -1880,64 +1808,64 @@ define float @fmuladd_strict_fmf(ptr %a, ptr %b, i64 %n) #0 { ; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK6:%.*]] = phi <vscale x 8 x i1> [ [[ACTIVE_LANE_MASK_ENTRY3]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT16:%.*]], [[VECTOR_BODY]] ] ; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK7:%.*]] = phi <vscale x 8 x i1> [ [[ACTIVE_LANE_MASK_ENTRY4]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT17:%.*]], [[VECTOR_BODY]] ] ; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK8:%.*]] = phi <vscale x 8 x i1> [ [[ACTIVE_LANE_MASK_ENTRY5]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT18:%.*]], [[VECTOR_BODY]] ] -; CHECK-ORDERED-TF-NEXT: [[VEC_PHI:%.*]] = phi float [ 0.000000e+00, [[VECTOR_PH]] ], [ [[TMP47:%.*]], [[VECTOR_BODY]] ] -; CHECK-ORDERED-TF-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] +; CHECK-ORDERED-TF-NEXT: [[VEC_PHI:%.*]] = phi float [ 0.000000e+00, [[VECTOR_PH]] ], [ [[TMP44:%.*]], [[VECTOR_BODY]] ] +; CHECK-ORDERED-TF-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] +; CHECK-ORDERED-TF-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-ORDERED-TF-NEXT: [[TMP15:%.*]] = mul nuw i64 [[TMP14]], 8 +; CHECK-ORDERED-TF-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i64 [[TMP15]] ; CHECK-ORDERED-TF-NEXT: [[TMP17:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-TF-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP17]], 8 -; CHECK-ORDERED-TF-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[TMP18]] +; CHECK-ORDERED-TF-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP17]], 16 +; CHECK-ORDERED-TF-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i64 [[TMP18]] ; CHECK-ORDERED-TF-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-TF-NEXT: [[TMP21:%.*]] = mul nuw i64 [[TMP20]], 16 -; CHECK-ORDERED-TF-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[TMP21]] -; CHECK-ORDERED-TF-NEXT: [[TMP23:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-TF-NEXT: [[TMP24:%.*]] = mul nuw i64 [[TMP23]], 24 -; CHECK-ORDERED-TF-NEXT: [[TMP25:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[TMP24]] -; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP16]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK]], <vscale x 8 x float> poison) -; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD9:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP19]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK6]], <vscale x 8 x float> poison) -; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD10:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP22]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK7]], <vscale x 8 x float> poison) -; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD11:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP25]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK8]], <vscale x 8 x float> poison) -; CHECK-ORDERED-TF-NEXT: [[TMP26:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]] +; CHECK-ORDERED-TF-NEXT: [[TMP21:%.*]] = mul nuw i64 [[TMP20]], 24 +; CHECK-ORDERED-TF-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i64 [[TMP21]] +; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP13]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK]], <vscale x 8 x float> poison) +; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD9:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP16]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK6]], <vscale x 8 x float> poison) +; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD10:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP19]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK7]], <vscale x 8 x float> poison) +; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD11:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP22]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK8]], <vscale x 8 x float> poison) +; CHECK-ORDERED-TF-NEXT: [[TMP23:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]] +; CHECK-ORDERED-TF-NEXT: [[TMP24:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-ORDERED-TF-NEXT: [[TMP25:%.*]] = mul nuw i64 [[TMP24]], 8 +; CHECK-ORDERED-TF-NEXT: [[TMP26:%.*]] = getelementptr inbounds float, ptr [[TMP23]], i64 [[TMP25]] ; CHECK-ORDERED-TF-NEXT: [[TMP27:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-TF-NEXT: [[TMP28:%.*]] = mul nuw i64 [[TMP27]], 8 -; CHECK-ORDERED-TF-NEXT: [[TMP29:%.*]] = getelementptr inbounds float, ptr [[TMP26]], i64 [[TMP28]] +; CHECK-ORDERED-TF-NEXT: [[TMP28:%.*]] = mul nuw i64 [[TMP27]], 16 +; CHECK-ORDERED-TF-NEXT: [[TMP29:%.*]] = getelementptr inbounds float, ptr [[TMP23]], i64 [[TMP28]] ; CHECK-ORDERED-TF-NEXT: [[TMP30:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-TF-NEXT: [[TMP31:%.*]] = mul nuw i64 [[TMP30]], 16 -; CHECK-ORDERED-TF-NEXT: [[TMP32:%.*]] = getelementptr inbounds float, ptr [[TMP26]], i64 [[TMP31]] -; CHECK-ORDERED-TF-NEXT: [[TMP33:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-TF-NEXT: [[TMP34:%.*]] = mul nuw i64 [[TMP33]], 24 -; CHECK-ORDERED-TF-NEXT: [[TMP35:%.*]] = getelementptr inbounds float, ptr [[TMP26]], i64 [[TMP34]] -; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD12:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP26]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK]], <vscale x 8 x float> poison) -; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD13:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP29]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK6]], <vscale x 8 x float> poison) -; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD14:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP32]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK7]], <vscale x 8 x float> poison) -; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD15:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP35]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK8]], <vscale x 8 x float> poison) -; CHECK-ORDERED-TF-NEXT: [[TMP36:%.*]] = fmul nnan <vscale x 8 x float> [[WIDE_MASKED_LOAD]], [[WIDE_MASKED_LOAD12]] -; CHECK-ORDERED-TF-NEXT: [[TMP37:%.*]] = fmul nnan <vscale x 8 x float> [[WIDE_MASKED_LOAD9]], [[WIDE_MASKED_LOAD13]] -; CHECK-ORDERED-TF-NEXT: [[TMP38:%.*]] = fmul nnan <vscale x 8 x float> [[WIDE_MASKED_LOAD10]], [[WIDE_MASKED_LOAD14]] -; CHECK-ORDERED-TF-NEXT: [[TMP39:%.*]] = fmul nnan <vscale x 8 x float> [[WIDE_MASKED_LOAD11]], [[WIDE_MASKED_LOAD15]] -; CHECK-ORDERED-TF-NEXT: [[TMP40:%.*]] = select nnan <vscale x 8 x i1> [[ACTIVE_LANE_MASK]], <vscale x 8 x float> [[TMP36]], <vscale x 8 x float> splat (float -0.000000e+00) -; CHECK-ORDERED-TF-NEXT: [[TMP41:%.*]] = call nnan float @llvm.vector.reduce.fadd.nxv8f32(float [[VEC_PHI]], <vscale x 8 x float> [[TMP40]]) -; CHECK-ORDERED-TF-NEXT: [[TMP42:%.*]] = select nnan <vscale x 8 x i1> [[ACTIVE_LANE_MASK6]], <vscale x 8 x float> [[TMP37]], <vscale x 8 x float> splat (float -0.000000e+00) -; CHECK-ORDERED-TF-NEXT: [[TMP43:%.*]] = call nnan float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP41]], <vscale x 8 x float> [[TMP42]]) -; CHECK-ORDERED-TF-NEXT: [[TMP44:%.*]] = select nnan <vscale x 8 x i1> [[ACTIVE_LANE_MASK7]], <vscale x 8 x float> [[TMP38]], <vscale x 8 x float> splat (float -0.000000e+00) -; CHECK-ORDERED-TF-NEXT: [[TMP45:%.*]] = call nnan float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP43]], <vscale x 8 x float> [[TMP44]]) -; CHECK-ORDERED-TF-NEXT: [[TMP46:%.*]] = select nnan <vscale x 8 x i1> [[ACTIVE_LANE_MASK8]], <vscale x 8 x float> [[TMP39]], <vscale x 8 x float> splat (float -0.000000e+00) -; CHECK-ORDERED-TF-NEXT: [[TMP47]] = call nnan float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP45]], <vscale x 8 x float> [[TMP46]]) -; CHECK-ORDERED-TF-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP4]] +; CHECK-ORDERED-TF-NEXT: [[TMP31:%.*]] = mul nuw i64 [[TMP30]], 24 +; CHECK-ORDERED-TF-NEXT: [[TMP32:%.*]] = getelementptr inbounds float, ptr [[TMP23]], i64 [[TMP31]] +; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD12:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP23]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK]], <vscale x 8 x float> poison) +; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD13:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP26]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK6]], <vscale x 8 x float> poison) +; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD14:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP29]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK7]], <vscale x 8 x float> poison) +; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD15:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP32]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK8]], <vscale x 8 x float> poison) +; CHECK-ORDERED-TF-NEXT: [[TMP33:%.*]] = fmul nnan <vscale x 8 x float> [[WIDE_MASKED_LOAD]], [[WIDE_MASKED_LOAD12]] +; CHECK-ORDERED-TF-NEXT: [[TMP34:%.*]] = fmul nnan <vscale x 8 x float> [[WIDE_MASKED_LOAD9]], [[WIDE_MASKED_LOAD13]] +; CHECK-ORDERED-TF-NEXT: [[TMP35:%.*]] = fmul nnan <vscale x 8 x float> [[WIDE_MASKED_LOAD10]], [[WIDE_MASKED_LOAD14]] +; CHECK-ORDERED-TF-NEXT: [[TMP36:%.*]] = fmul nnan <vscale x 8 x float> [[WIDE_MASKED_LOAD11]], [[WIDE_MASKED_LOAD15]] +; CHECK-ORDERED-TF-NEXT: [[TMP37:%.*]] = select nnan <vscale x 8 x i1> [[ACTIVE_LANE_MASK]], <vscale x 8 x float> [[TMP33]], <vscale x 8 x float> splat (float -0.000000e+00) +; CHECK-ORDERED-TF-NEXT: [[TMP38:%.*]] = call nnan float @llvm.vector.reduce.fadd.nxv8f32(float [[VEC_PHI]], <vscale x 8 x float> [[TMP37]]) +; CHECK-ORDERED-TF-NEXT: [[TMP39:%.*]] = select nnan <vscale x 8 x i1> [[ACTIVE_LANE_MASK6]], <vscale x 8 x float> [[TMP34]], <vscale x 8 x float> splat (float -0.000000e+00) +; CHECK-ORDERED-TF-NEXT: [[TMP40:%.*]] = call nnan float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP38]], <vscale x 8 x float> [[TMP39]]) +; CHECK-ORDERED-TF-NEXT: [[TMP41:%.*]] = select nnan <vscale x 8 x i1> [[ACTIVE_LANE_MASK7]], <vscale x 8 x float> [[TMP35]], <vscale x 8 x float> splat (float -0.000000e+00) +; CHECK-ORDERED-TF-NEXT: [[TMP42:%.*]] = call nnan float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP40]], <vscale x 8 x float> [[TMP41]]) +; CHECK-ORDERED-TF-NEXT: [[TMP43:%.*]] = select nnan <vscale x 8 x i1> [[ACTIVE_LANE_MASK8]], <vscale x 8 x float> [[TMP36]], <vscale x 8 x float> splat (float -0.000000e+00) +; CHECK-ORDERED-TF-NEXT: [[TMP44]] = call nnan float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP42]], <vscale x 8 x float> [[TMP43]]) +; CHECK-ORDERED-TF-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]] +; CHECK-ORDERED-TF-NEXT: [[TMP45:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-ORDERED-TF-NEXT: [[TMP46:%.*]] = mul nuw i64 [[TMP45]], 8 +; CHECK-ORDERED-TF-NEXT: [[TMP47:%.*]] = add i64 [[INDEX]], [[TMP46]] ; CHECK-ORDERED-TF-NEXT: [[TMP48:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-TF-NEXT: [[TMP49:%.*]] = mul nuw i64 [[TMP48]], 8 +; CHECK-ORDERED-TF-NEXT: [[TMP49:%.*]] = mul nuw i64 [[TMP48]], 16 ; CHECK-ORDERED-TF-NEXT: [[TMP50:%.*]] = add i64 [[INDEX]], [[TMP49]] ; CHECK-ORDERED-TF-NEXT: [[TMP51:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-TF-NEXT: [[TMP52:%.*]] = mul nuw i64 [[TMP51]], 16 +; CHECK-ORDERED-TF-NEXT: [[TMP52:%.*]] = mul nuw i64 [[TMP51]], 24 ; CHECK-ORDERED-TF-NEXT: [[TMP53:%.*]] = add i64 [[INDEX]], [[TMP52]] -; CHECK-ORDERED-TF-NEXT: [[TMP54:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-ORDERED-TF-NEXT: [[TMP55:%.*]] = mul nuw i64 [[TMP54]], 24 -; CHECK-ORDERED-TF-NEXT: [[TMP56:%.*]] = add i64 [[INDEX]], [[TMP55]] -; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[INDEX]], i64 [[TMP9]]) -; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT16]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP50]], i64 [[TMP9]]) -; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT17]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP53]], i64 [[TMP9]]) -; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT18]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP56]], i64 [[TMP9]]) -; CHECK-ORDERED-TF-NEXT: [[TMP57:%.*]] = xor <vscale x 8 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) -; CHECK-ORDERED-TF-NEXT: [[TMP58:%.*]] = extractelement <vscale x 8 x i1> [[TMP57]], i32 0 -; CHECK-ORDERED-TF-NEXT: br i1 [[TMP58]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[INDEX]], i64 [[TMP6]]) +; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT16]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP47]], i64 [[TMP6]]) +; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT17]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP50]], i64 [[TMP6]]) +; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT18]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP53]], i64 [[TMP6]]) +; CHECK-ORDERED-TF-NEXT: [[TMP54:%.*]] = xor <vscale x 8 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) +; CHECK-ORDERED-TF-NEXT: [[TMP55:%.*]] = extractelement <vscale x 8 x i1> [[TMP54]], i32 0 +; CHECK-ORDERED-TF-NEXT: br i1 [[TMP55]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; CHECK-ORDERED-TF: middle.block: ; CHECK-ORDERED-TF-NEXT: br label [[FOR_END:%.*]] ; CHECK-ORDERED-TF: scalar.ph: @@ -1948,15 +1876,15 @@ define float @fmuladd_strict_fmf(ptr %a, ptr %b, i64 %n) #0 { ; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-ORDERED-TF-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[MULADD:%.*]], [[FOR_BODY]] ] ; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; CHECK-ORDERED-TF-NEXT: [[TMP59:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; CHECK-ORDERED-TF-NEXT: [[TMP56:%.*]] = load float, ptr [[ARRAYIDX]], align 4 ; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]] -; CHECK-ORDERED-TF-NEXT: [[TMP60:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 -; CHECK-ORDERED-TF-NEXT: [[MULADD]] = tail call nnan float @llvm.fmuladd.f32(float [[TMP59]], float [[TMP60]], float [[SUM_07]]) +; CHECK-ORDERED-TF-NEXT: [[TMP57:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 +; CHECK-ORDERED-TF-NEXT: [[MULADD]] = tail call nnan float @llvm.fmuladd.f32(float [[TMP56]], float [[TMP57]], float [[SUM_07]]) ; CHECK-ORDERED-TF-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-ORDERED-TF-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] ; CHECK-ORDERED-TF-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; CHECK-ORDERED-TF: for.end: -; CHECK-ORDERED-TF-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], [[FOR_BODY]] ], [ [[TMP47]], [[MIDDLE_BLOCK]] ] +; CHECK-ORDERED-TF-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], [[FOR_BODY]] ], [ [[TMP44]], [[MIDDLE_BLOCK]] ] ; CHECK-ORDERED-TF-NEXT: ret float [[MULADD_LCSSA]] ; diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/simple_early_exit.ll b/llvm/test/Transforms/LoopVectorize/AArch64/simple_early_exit.ll index eaf85694..4a12aef 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/simple_early_exit.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/simple_early_exit.ll @@ -22,8 +22,6 @@ define i64 @same_exit_block_pre_inc_use1() #1 { ; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 64, [[TMP3]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 64, [[N_MOD_VF]] -; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 16 ; CHECK-NEXT: [[TMP6:%.*]] = add i64 3, [[N_VEC]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: @@ -34,7 +32,7 @@ define i64 @same_exit_block_pre_inc_use1() #1 { ; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[OFFSET_IDX]] ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 16 x i8>, ptr [[TMP13]], align 1 ; CHECK-NEXT: [[TMP16:%.*]] = icmp ne <vscale x 16 x i8> [[WIDE_LOAD]], [[WIDE_LOAD2]] -; CHECK-NEXT: [[INDEX_NEXT3]] = add nuw i64 [[INDEX1]], [[TMP5]] +; CHECK-NEXT: [[INDEX_NEXT3]] = add nuw i64 [[INDEX1]], [[TMP3]] ; CHECK-NEXT: [[TMP17:%.*]] = call i1 @llvm.vector.reduce.or.nxv16i1(<vscale x 16 x i1> [[TMP16]]) ; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT3]], [[N_VEC]] ; CHECK-NEXT: [[TMP19:%.*]] = or i1 [[TMP17]], [[TMP18]] @@ -264,8 +262,6 @@ define i64 @loop_contains_safe_div() #1 { ; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP10]], 4 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 64, [[TMP3]] ; CHECK-NEXT: [[INDEX1:%.*]] = sub i64 64, [[N_MOD_VF]] -; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 3, [[INDEX1]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: @@ -275,7 +271,7 @@ define i64 @loop_contains_safe_div() #1 { ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP1]], align 1 ; CHECK-NEXT: [[TMP13:%.*]] = udiv <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 20000) ; CHECK-NEXT: [[TMP15:%.*]] = icmp ne <vscale x 4 x i32> [[TMP13]], splat (i32 1) -; CHECK-NEXT: [[INDEX_NEXT2]] = add nuw i64 [[INDEX2]], [[TMP5]] +; CHECK-NEXT: [[INDEX_NEXT2]] = add nuw i64 [[INDEX2]], [[TMP3]] ; CHECK-NEXT: [[TMP6:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP15]]) ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT2]], [[INDEX1]] ; CHECK-NEXT: [[TMP8:%.*]] = or i1 [[TMP6]], [[TMP7]] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/single-early-exit-interleave.ll b/llvm/test/Transforms/LoopVectorize/AArch64/single-early-exit-interleave.ll index 070f658..cc83819 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/single-early-exit-interleave.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/single-early-exit-interleave.ll @@ -22,8 +22,6 @@ define i64 @same_exit_block_pre_inc_use1() #0 { ; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 64 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 510, [[TMP3]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 510, [[N_MOD_VF]] -; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 64 ; CHECK-NEXT: [[INDEX_NEXT:%.*]] = add i64 3, [[N_VEC]] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: vector.body: @@ -61,7 +59,7 @@ define i64 @same_exit_block_pre_inc_use1() #0 { ; CHECK-NEXT: [[TMP30:%.*]] = icmp ne <vscale x 16 x i8> [[WIDE_LOAD2]], [[WIDE_LOAD6]] ; CHECK-NEXT: [[TMP31:%.*]] = icmp ne <vscale x 16 x i8> [[WIDE_LOAD3]], [[WIDE_LOAD7]] ; CHECK-NEXT: [[TMP59:%.*]] = icmp ne <vscale x 16 x i8> [[WIDE_LOAD4]], [[WIDE_LOAD8]] -; CHECK-NEXT: [[INDEX_NEXT3]] = add nuw i64 [[INDEX1]], [[TMP5]] +; CHECK-NEXT: [[INDEX_NEXT3]] = add nuw i64 [[INDEX1]], [[TMP3]] ; CHECK-NEXT: [[TMP34:%.*]] = or <vscale x 16 x i1> [[TMP32]], [[TMP30]] ; CHECK-NEXT: [[TMP37:%.*]] = or <vscale x 16 x i1> [[TMP34]], [[TMP31]] ; CHECK-NEXT: [[TMP33:%.*]] = or <vscale x 16 x i1> [[TMP37]], [[TMP59]] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll b/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll index d32b898..cc88946 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll @@ -23,8 +23,6 @@ define void @cost_store_i8(ptr %dst) #0 { ; DEFAULT-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 32 ; DEFAULT-NEXT: [[N_MOD_VF:%.*]] = urem i64 101, [[TMP5]] ; DEFAULT-NEXT: [[N_VEC:%.*]] = sub i64 101, [[N_MOD_VF]] -; DEFAULT-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() -; DEFAULT-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 32 ; DEFAULT-NEXT: br label [[VECTOR_BODY:%.*]] ; DEFAULT: vector.body: ; DEFAULT-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -34,7 +32,7 @@ define void @cost_store_i8(ptr %dst) #0 { ; DEFAULT-NEXT: [[TMP24:%.*]] = getelementptr i8, ptr [[TMP9]], i64 [[TMP23]] ; DEFAULT-NEXT: store <vscale x 16 x i8> zeroinitializer, ptr [[TMP9]], align 1 ; DEFAULT-NEXT: store <vscale x 16 x i8> zeroinitializer, ptr [[TMP24]], align 1 -; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]] +; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] ; DEFAULT-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; DEFAULT-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; DEFAULT: middle.block: @@ -52,14 +50,12 @@ define void @cost_store_i8(ptr %dst) #0 { ; DEFAULT-NEXT: [[TMP15:%.*]] = mul nuw i64 [[TMP14]], 8 ; DEFAULT-NEXT: [[N_MOD_VF2:%.*]] = urem i64 101, [[TMP15]] ; DEFAULT-NEXT: [[N_VEC3:%.*]] = sub i64 101, [[N_MOD_VF2]] -; DEFAULT-NEXT: [[TMP16:%.*]] = call i64 @llvm.vscale.i64() -; DEFAULT-NEXT: [[TMP17:%.*]] = mul nuw i64 [[TMP16]], 8 ; DEFAULT-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]] ; DEFAULT: vec.epilog.vector.body: ; DEFAULT-NEXT: [[INDEX5:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT6:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ] ; DEFAULT-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[DST]], i64 [[INDEX5]] ; DEFAULT-NEXT: store <vscale x 8 x i8> zeroinitializer, ptr [[TMP19]], align 1 -; DEFAULT-NEXT: [[INDEX_NEXT6]] = add nuw i64 [[INDEX5]], [[TMP17]] +; DEFAULT-NEXT: [[INDEX_NEXT6]] = add nuw i64 [[INDEX5]], [[TMP15]] ; DEFAULT-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT6]], [[N_VEC3]] ; DEFAULT-NEXT: br i1 [[TMP21]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; DEFAULT: vec.epilog.middle.block: @@ -85,12 +81,6 @@ define void @cost_store_i8(ptr %dst) #0 { ; PRED: vector.ph: ; PRED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; PRED-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 16 -; PRED-NEXT: [[TMP4:%.*]] = sub i64 [[TMP1]], 1 -; PRED-NEXT: [[N_RND_UP:%.*]] = add i64 101, [[TMP4]] -; PRED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; PRED-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] -; PRED-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; PRED-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16 ; PRED-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; PRED-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16 ; PRED-NEXT: [[TMP9:%.*]] = sub i64 101, [[TMP8]] @@ -103,7 +93,7 @@ define void @cost_store_i8(ptr %dst) #0 { ; PRED-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 16 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] ; PRED-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[DST]], i64 [[INDEX]] ; PRED-NEXT: call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> zeroinitializer, ptr [[TMP13]], i32 1, <vscale x 16 x i1> [[ACTIVE_LANE_MASK]]) -; PRED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP6]] +; PRED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]] ; PRED-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64 [[INDEX]], i64 [[TMP11]]) ; PRED-NEXT: [[TMP15:%.*]] = xor <vscale x 16 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) ; PRED-NEXT: [[TMP16:%.*]] = extractelement <vscale x 16 x i1> [[TMP15]], i32 0 @@ -227,12 +217,6 @@ define void @trunc_store(ptr %dst, ptr %src, i16 %x) #1 { ; PRED: vector.ph: ; PRED-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() ; PRED-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP10]], 2 -; PRED-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 -; PRED-NEXT: [[N_RND_UP:%.*]] = add i64 1000, [[TMP2]] -; PRED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; PRED-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] -; PRED-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() -; PRED-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2 ; PRED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i16> poison, i16 [[X]], i64 0 ; PRED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i16> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer ; PRED-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1000) @@ -248,7 +232,7 @@ define void @trunc_store(ptr %dst, ptr %src, i16 %x) #1 { ; PRED-NEXT: [[TMP9:%.*]] = and <vscale x 2 x i8> [[TMP8]], [[TMP11]] ; PRED-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[DST]], i64 [[INDEX]] ; PRED-NEXT: call void @llvm.masked.store.nxv2i8.p0(<vscale x 2 x i8> [[TMP9]], ptr [[TMP5]], i32 1, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]]), !alias.scope [[META7:![0-9]+]], !noalias [[META4]] -; PRED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP4]] +; PRED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]] ; PRED-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX_NEXT]], i64 1000) ; PRED-NEXT: [[TMP12:%.*]] = xor <vscale x 2 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) ; PRED-NEXT: [[TMP13:%.*]] = extractelement <vscale x 2 x i1> [[TMP12]], i32 0 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/struct-return-cost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/struct-return-cost.ll index c721493..bdbbfdf 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/struct-return-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/struct-return-cost.ll @@ -82,8 +82,8 @@ define void @struct_return_replicate(ptr noalias %in, ptr noalias writeonly %out ; CHECK: [[ENTRY:.*:]] ; CHECK: [[VECTOR_PH:.*:]] ; CHECK: [[VECTOR_BODY:.*:]] -; CHECK: [[TMP4:%.*]] = tail call { half, half } @foo(half [[TMP3:%.*]]) #[[ATTR3:[0-9]+]] -; CHECK: [[TMP6:%.*]] = tail call { half, half } @foo(half [[TMP5:%.*]]) #[[ATTR3]] +; CHECK: [[TMP2:%.*]] = tail call { half, half } @foo(half [[TMP1:%.*]]) #[[ATTR3:[0-9]+]] +; CHECK: [[TMP4:%.*]] = tail call { half, half } @foo(half [[TMP3:%.*]]) #[[ATTR3]] ; CHECK: [[MIDDLE_BLOCK:.*:]] ; CHECK: [[SCALAR_PH:.*:]] ; CHECK: [[FOR_BODY:.*:]] @@ -149,10 +149,9 @@ exit: define void @struct_return_scalable(ptr noalias %in, ptr noalias writeonly %out_a, ptr noalias writeonly %out_b) #2 { ; CHECK-LABEL: define void @struct_return_scalable( ; CHECK-SAME: ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) #[[ATTR0:[0-9]+]] { -; CHECK: [[ENTRY:.*:]] -; CHECK: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; CHECK: [[VECTOR_PH:.*:]] ; CHECK: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; CHECK: [[VECTOR_PH1:.*:]] ; CHECK: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK: [[VECTOR_BODY:.*:]] ; CHECK: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-cond-inv-loads.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-cond-inv-loads.ll index 67f5083..495f9c0 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-cond-inv-loads.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-cond-inv-loads.ll @@ -10,11 +10,10 @@ define void @cond_inv_load_i32i32i16(ptr noalias nocapture %a, ptr noalias nocap ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -4 -; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]] ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 2 +; CHECK-NEXT: [[DOTNOT:%.*]] = sub nsw i64 0, [[TMP4]] +; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNOT]] ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x ptr> poison, ptr [[INV:%.*]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x ptr> [[BROADCAST_SPLATINSERT]], <vscale x 4 x ptr> poison, <vscale x 4 x i32> zeroinitializer ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] @@ -69,11 +68,10 @@ define void @cond_inv_load_f64f64f64(ptr noalias nocapture %a, ptr noalias nocap ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -4 -; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]] ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 2 +; CHECK-NEXT: [[DOTNOT:%.*]] = sub nsw i64 0, [[TMP4]] +; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNOT]] ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x ptr> poison, ptr [[INV:%.*]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x ptr> [[BROADCAST_SPLATINSERT]], <vscale x 4 x ptr> poison, <vscale x 4 x i32> zeroinitializer ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] @@ -126,11 +124,10 @@ define void @invariant_load_cond(ptr noalias nocapture %a, ptr nocapture readonl ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -4 -; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]] ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 2 +; CHECK-NEXT: [[DOTNOT:%.*]] = sub nsw i64 0, [[TMP4]] +; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNOT]] ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw i8, ptr [[B:%.*]], i64 168 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x ptr> poison, ptr [[TMP5]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x ptr> [[BROADCAST_SPLATINSERT]], <vscale x 4 x ptr> poison, <vscale x 4 x i32> zeroinitializer diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-inloop-reductions.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-inloop-reductions.ll index e555785..d89c525 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-inloop-reductions.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-inloop-reductions.ll @@ -19,8 +19,6 @@ define i64 @int_reduction_and(ptr noalias nocapture %a, i64 %N) { ; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; CHECK-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP21:%.*]] = mul nuw i64 [[TMP20]], 4 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -36,7 +34,7 @@ define i64 @int_reduction_and(ptr noalias nocapture %a, i64 %N) { ; CHECK-NEXT: [[TMP17]] = and i64 [[VEC_PHI]], [[TMP16]] ; CHECK-NEXT: [[TMP18:%.*]] = call i64 @llvm.vector.reduce.and.nxv2i64(<vscale x 2 x i64> [[WIDE_LOAD3]]) ; CHECK-NEXT: [[TMP19]] = and i64 [[VEC_PHI2]], [[TMP18]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP21]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-reductions.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-reductions.ll index af9c39e..5e22532 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-reductions.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-reductions.ll @@ -19,8 +19,6 @@ define i64 @int_reduction_add(ptr %a, i64 %N) { ; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; CHECK-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP19:%.*]] = mul nuw i64 [[TMP18]], 4 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -34,7 +32,7 @@ define i64 @int_reduction_add(ptr %a, i64 %N) { ; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 2 x i64>, ptr [[TMP15]], align 8 ; CHECK-NEXT: [[TMP16]] = add <vscale x 2 x i64> [[WIDE_LOAD]], [[VEC_PHI]] ; CHECK-NEXT: [[TMP17]] = add <vscale x 2 x i64> [[WIDE_LOAD3]], [[VEC_PHI2]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP19]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-strict-reductions.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-strict-reductions.ll index fca29cd..52117e3f 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-strict-reductions.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-strict-reductions.ll @@ -19,8 +19,6 @@ define float @fadd_strict(ptr noalias nocapture readonly %a, i64 %n) { ; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -33,7 +31,7 @@ define float @fadd_strict(ptr noalias nocapture readonly %a, i64 %n) { ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 4 x float>, ptr [[TMP17]], align 4 ; CHECK-NEXT: [[TMP18:%.*]] = call float @llvm.vector.reduce.fadd.nxv4f32(float [[VEC_PHI]], <vscale x 4 x float> [[WIDE_LOAD]]) ; CHECK-NEXT: [[TMP19]] = call float @llvm.vector.reduce.fadd.nxv4f32(float [[TMP18]], <vscale x 4 x float> [[WIDE_LOAD2]]) -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect.ll index 0f407cd..a521bfa 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect.ll @@ -35,8 +35,6 @@ define void @main_vf_vscale_x_16(ptr %A) #0 { ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 32 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP5]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] -; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 32 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -46,7 +44,7 @@ define void @main_vf_vscale_x_16(ptr %A) #0 { ; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i8, ptr [[TMP14]], i64 [[TMP18]] ; CHECK-NEXT: store <vscale x 16 x i8> splat (i8 1), ptr [[TMP14]], align 1 ; CHECK-NEXT: store <vscale x 16 x i8> splat (i8 1), ptr [[TMP19]], align 1 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] ; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: @@ -64,14 +62,12 @@ define void @main_vf_vscale_x_16(ptr %A) #0 { ; CHECK-NEXT: [[TMP24:%.*]] = mul nuw i64 [[TMP23]], 8 ; CHECK-NEXT: [[N_MOD_VF2:%.*]] = urem i64 1024, [[TMP24]] ; CHECK-NEXT: [[N_VEC3:%.*]] = sub i64 1024, [[N_MOD_VF2]] -; CHECK-NEXT: [[TMP25:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP26:%.*]] = mul nuw i64 [[TMP25]], 8 ; CHECK-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]] ; CHECK: vec.epilog.vector.body: ; CHECK-NEXT: [[INDEX5:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT6:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDEX5]] ; CHECK-NEXT: store <vscale x 8 x i8> splat (i8 1), ptr [[TMP28]], align 1 -; CHECK-NEXT: [[INDEX_NEXT6]] = add nuw i64 [[INDEX5]], [[TMP26]] +; CHECK-NEXT: [[INDEX_NEXT6]] = add nuw i64 [[INDEX5]], [[TMP24]] ; CHECK-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT6]], [[N_VEC3]] ; CHECK-NEXT: br i1 [[TMP30]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: vec.epilog.middle.block: @@ -95,8 +91,6 @@ define void @main_vf_vscale_x_16(ptr %A) #0 { ; CHECK-VF8-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 32 ; CHECK-VF8-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] ; CHECK-VF8-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] -; CHECK-VF8-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-VF8-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 32 ; CHECK-VF8-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-VF8: vector.body: ; CHECK-VF8-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -106,7 +100,7 @@ define void @main_vf_vscale_x_16(ptr %A) #0 { ; CHECK-VF8-NEXT: [[TMP17:%.*]] = getelementptr inbounds i8, ptr [[TMP12]], i64 [[TMP16]] ; CHECK-VF8-NEXT: store <vscale x 16 x i8> splat (i8 1), ptr [[TMP12]], align 1 ; CHECK-VF8-NEXT: store <vscale x 16 x i8> splat (i8 1), ptr [[TMP17]], align 1 -; CHECK-VF8-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-VF8-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-VF8-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-VF8-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-VF8: middle.block: @@ -159,8 +153,6 @@ define void @main_vf_vscale_x_2_no_epi_iteration(ptr %A) #0 vscale_range(8, 8) { ; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] -; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -170,7 +162,7 @@ define void @main_vf_vscale_x_2_no_epi_iteration(ptr %A) #0 vscale_range(8, 8) { ; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i64, ptr [[TMP6]], i64 [[TMP8]] ; CHECK-NEXT: store <vscale x 2 x i64> splat (i64 1), ptr [[TMP6]], align 1 ; CHECK-NEXT: store <vscale x 2 x i64> splat (i64 1), ptr [[TMP9]], align 1 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: @@ -194,8 +186,6 @@ define void @main_vf_vscale_x_2_no_epi_iteration(ptr %A) #0 vscale_range(8, 8) { ; CHECK-VF8-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; CHECK-VF8-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] ; CHECK-VF8-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] -; CHECK-VF8-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-VF8-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-VF8-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-VF8: vector.body: ; CHECK-VF8-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -205,7 +195,7 @@ define void @main_vf_vscale_x_2_no_epi_iteration(ptr %A) #0 vscale_range(8, 8) { ; CHECK-VF8-NEXT: [[TMP9:%.*]] = getelementptr inbounds i64, ptr [[TMP6]], i64 [[TMP8]] ; CHECK-VF8-NEXT: store <vscale x 2 x i64> splat (i64 1), ptr [[TMP6]], align 1 ; CHECK-VF8-NEXT: store <vscale x 2 x i64> splat (i64 1), ptr [[TMP9]], align 1 -; CHECK-VF8-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-VF8-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-VF8-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-VF8-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK-VF8: middle.block: @@ -276,8 +266,6 @@ define void @main_vf_vscale_x_2(ptr %A, i64 %n) #0 vscale_range(8, 8) { ; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -287,7 +275,7 @@ define void @main_vf_vscale_x_2(ptr %A, i64 %n) #0 vscale_range(8, 8) { ; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i64, ptr [[TMP12]], i64 [[TMP16]] ; CHECK-NEXT: store <vscale x 2 x i64> splat (i64 1), ptr [[TMP12]], align 1 ; CHECK-NEXT: store <vscale x 2 x i64> splat (i64 1), ptr [[TMP17]], align 1 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: middle.block: @@ -331,8 +319,6 @@ define void @main_vf_vscale_x_2(ptr %A, i64 %n) #0 vscale_range(8, 8) { ; CHECK-VF8-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; CHECK-VF8-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; CHECK-VF8-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; CHECK-VF8-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-VF8-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-VF8-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-VF8: vector.body: ; CHECK-VF8-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -342,7 +328,7 @@ define void @main_vf_vscale_x_2(ptr %A, i64 %n) #0 vscale_range(8, 8) { ; CHECK-VF8-NEXT: [[TMP17:%.*]] = getelementptr inbounds i64, ptr [[TMP12]], i64 [[TMP16]] ; CHECK-VF8-NEXT: store <vscale x 2 x i64> splat (i64 1), ptr [[TMP12]], align 1 ; CHECK-VF8-NEXT: store <vscale x 2 x i64> splat (i64 1), ptr [[TMP17]], align 1 -; CHECK-VF8-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-VF8-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-VF8-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-VF8-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK-VF8: middle.block: @@ -406,8 +392,6 @@ define void @test_pr57912_pointer_induction(ptr %start) #0 { ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 32 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 10000, [[TMP5]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 10000, [[N_MOD_VF]] -; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 32 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -417,7 +401,7 @@ define void @test_pr57912_pointer_induction(ptr %start) #0 { ; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[TMP14]], i64 [[TMP18]] ; CHECK-NEXT: store <vscale x 16 x i8> zeroinitializer, ptr [[TMP14]], align 1 ; CHECK-NEXT: store <vscale x 16 x i8> zeroinitializer, ptr [[TMP19]], align 1 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] ; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: middle.block: @@ -436,15 +420,13 @@ define void @test_pr57912_pointer_induction(ptr %start) #0 { ; CHECK-NEXT: [[TMP24:%.*]] = mul nuw i64 [[TMP23]], 8 ; CHECK-NEXT: [[N_MOD_VF2:%.*]] = urem i64 10000, [[TMP24]] ; CHECK-NEXT: [[N_VEC3:%.*]] = sub i64 10000, [[N_MOD_VF2]] -; CHECK-NEXT: [[TMP25:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP26:%.*]] = mul nuw i64 [[TMP25]], 8 ; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[START]], i64 [[N_VEC3]] ; CHECK-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]] ; CHECK: vec.epilog.vector.body: ; CHECK-NEXT: [[INDEX7:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT8:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP28:%.*]] = getelementptr i8, ptr [[START]], i64 [[INDEX7]] ; CHECK-NEXT: store <vscale x 8 x i8> zeroinitializer, ptr [[TMP28]], align 1 -; CHECK-NEXT: [[INDEX_NEXT8]] = add nuw i64 [[INDEX7]], [[TMP26]] +; CHECK-NEXT: [[INDEX_NEXT8]] = add nuw i64 [[INDEX7]], [[TMP24]] ; CHECK-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT8]], [[N_VEC3]] ; CHECK-NEXT: br i1 [[TMP30]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: vec.epilog.middle.block: @@ -469,8 +451,6 @@ define void @test_pr57912_pointer_induction(ptr %start) #0 { ; CHECK-VF8-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 32 ; CHECK-VF8-NEXT: [[N_MOD_VF:%.*]] = urem i64 10000, [[TMP3]] ; CHECK-VF8-NEXT: [[N_VEC:%.*]] = sub i64 10000, [[N_MOD_VF]] -; CHECK-VF8-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-VF8-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 32 ; CHECK-VF8-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-VF8: vector.body: ; CHECK-VF8-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -480,7 +460,7 @@ define void @test_pr57912_pointer_induction(ptr %start) #0 { ; CHECK-VF8-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[TMP12]], i64 [[TMP16]] ; CHECK-VF8-NEXT: store <vscale x 16 x i8> zeroinitializer, ptr [[TMP12]], align 1 ; CHECK-VF8-NEXT: store <vscale x 16 x i8> zeroinitializer, ptr [[TMP17]], align 1 -; CHECK-VF8-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-VF8-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-VF8-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-VF8-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK-VF8: middle.block: @@ -545,8 +525,6 @@ define void @trip_count_vscale(ptr noalias %a, ptr noalias %b) vscale_range(1, 1 ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP5]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 8 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -569,7 +547,7 @@ define void @trip_count_vscale(ptr noalias %a, ptr noalias %b) vscale_range(1, 1 ; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw float, ptr [[TMP13]], i64 [[TMP21]] ; CHECK-NEXT: store <vscale x 4 x float> [[TMP18]], ptr [[TMP13]], align 4 ; CHECK-NEXT: store <vscale x 4 x float> [[TMP19]], ptr [[TMP22]], align 4 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] ; CHECK-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP23]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK: middle.block: @@ -587,8 +565,6 @@ define void @trip_count_vscale(ptr noalias %a, ptr noalias %b) vscale_range(1, 1 ; CHECK-NEXT: [[TMP27:%.*]] = mul nuw i64 [[TMP26]], 2 ; CHECK-NEXT: [[N_MOD_VF5:%.*]] = urem i64 [[N]], [[TMP27]] ; CHECK-NEXT: [[N_VEC6:%.*]] = sub i64 [[N]], [[N_MOD_VF5]] -; CHECK-NEXT: [[TMP28:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP29:%.*]] = mul nuw i64 [[TMP28]], 2 ; CHECK-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]] ; CHECK: vec.epilog.vector.body: ; CHECK-NEXT: [[INDEX7:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT10:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ] @@ -598,7 +574,7 @@ define void @trip_count_vscale(ptr noalias %a, ptr noalias %b) vscale_range(1, 1 ; CHECK-NEXT: [[WIDE_LOAD9:%.*]] = load <vscale x 2 x float>, ptr [[TMP32]], align 4 ; CHECK-NEXT: [[TMP34:%.*]] = fmul <vscale x 2 x float> [[WIDE_LOAD8]], [[WIDE_LOAD9]] ; CHECK-NEXT: store <vscale x 2 x float> [[TMP34]], ptr [[TMP32]], align 4 -; CHECK-NEXT: [[INDEX_NEXT10]] = add nuw i64 [[INDEX7]], [[TMP29]] +; CHECK-NEXT: [[INDEX_NEXT10]] = add nuw i64 [[INDEX7]], [[TMP27]] ; CHECK-NEXT: [[TMP35:%.*]] = icmp eq i64 [[INDEX_NEXT10]], [[N_VEC6]] ; CHECK-NEXT: br i1 [[TMP35]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK: vec.epilog.middle.block: @@ -621,8 +597,6 @@ define void @trip_count_vscale(ptr noalias %a, ptr noalias %b) vscale_range(1, 1 ; CHECK-VF8-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 ; CHECK-VF8-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; CHECK-VF8-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; CHECK-VF8-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-VF8-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8 ; CHECK-VF8-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-VF8: vector.body: ; CHECK-VF8-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -645,7 +619,7 @@ define void @trip_count_vscale(ptr noalias %a, ptr noalias %b) vscale_range(1, 1 ; CHECK-VF8-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw float, ptr [[TMP11]], i64 [[TMP19]] ; CHECK-VF8-NEXT: store <vscale x 4 x float> [[TMP16]], ptr [[TMP11]], align 4 ; CHECK-VF8-NEXT: store <vscale x 4 x float> [[TMP17]], ptr [[TMP20]], align 4 -; CHECK-VF8-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-VF8-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-VF8-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-VF8-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK-VF8: middle.block: @@ -697,8 +671,6 @@ define void @trip_count_vscale_no_epilogue_iterations(ptr noalias %a, ptr noalia ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP5]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 8 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -721,7 +693,7 @@ define void @trip_count_vscale_no_epilogue_iterations(ptr noalias %a, ptr noalia ; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw float, ptr [[TMP13]], i64 [[TMP21]] ; CHECK-NEXT: store <vscale x 4 x float> [[TMP18]], ptr [[TMP13]], align 4 ; CHECK-NEXT: store <vscale x 4 x float> [[TMP19]], ptr [[TMP22]], align 4 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] ; CHECK-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP23]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK: middle.block: @@ -739,8 +711,6 @@ define void @trip_count_vscale_no_epilogue_iterations(ptr noalias %a, ptr noalia ; CHECK-NEXT: [[TMP27:%.*]] = mul nuw i64 [[TMP26]], 2 ; CHECK-NEXT: [[N_MOD_VF5:%.*]] = urem i64 [[N]], [[TMP27]] ; CHECK-NEXT: [[N_VEC6:%.*]] = sub i64 [[N]], [[N_MOD_VF5]] -; CHECK-NEXT: [[TMP28:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP29:%.*]] = mul nuw i64 [[TMP28]], 2 ; CHECK-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]] ; CHECK: vec.epilog.vector.body: ; CHECK-NEXT: [[INDEX7:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT10:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ] @@ -750,7 +720,7 @@ define void @trip_count_vscale_no_epilogue_iterations(ptr noalias %a, ptr noalia ; CHECK-NEXT: [[WIDE_LOAD9:%.*]] = load <vscale x 2 x float>, ptr [[TMP32]], align 4 ; CHECK-NEXT: [[TMP34:%.*]] = fmul <vscale x 2 x float> [[WIDE_LOAD8]], [[WIDE_LOAD9]] ; CHECK-NEXT: store <vscale x 2 x float> [[TMP34]], ptr [[TMP32]], align 4 -; CHECK-NEXT: [[INDEX_NEXT10]] = add nuw i64 [[INDEX7]], [[TMP29]] +; CHECK-NEXT: [[INDEX_NEXT10]] = add nuw i64 [[INDEX7]], [[TMP27]] ; CHECK-NEXT: [[TMP35:%.*]] = icmp eq i64 [[INDEX_NEXT10]], [[N_VEC6]] ; CHECK-NEXT: br i1 [[TMP35]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK: vec.epilog.middle.block: @@ -773,8 +743,6 @@ define void @trip_count_vscale_no_epilogue_iterations(ptr noalias %a, ptr noalia ; CHECK-VF8-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 ; CHECK-VF8-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; CHECK-VF8-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; CHECK-VF8-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-VF8-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8 ; CHECK-VF8-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-VF8: vector.body: ; CHECK-VF8-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -797,7 +765,7 @@ define void @trip_count_vscale_no_epilogue_iterations(ptr noalias %a, ptr noalia ; CHECK-VF8-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw float, ptr [[TMP11]], i64 [[TMP19]] ; CHECK-VF8-NEXT: store <vscale x 4 x float> [[TMP16]], ptr [[TMP11]], align 4 ; CHECK-VF8-NEXT: store <vscale x 4 x float> [[TMP17]], ptr [[TMP20]], align 4 -; CHECK-VF8-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-VF8-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-VF8-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-VF8-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK-VF8: middle.block: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-fneg.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-fneg.ll index 24f93f0..863dae7 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-fneg.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-fneg.ll @@ -31,8 +31,6 @@ define void @fneg(ptr nocapture noundef writeonly %d, ptr nocapture noundef read ; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 16 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP7]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]] -; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 16 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -50,7 +48,7 @@ define void @fneg(ptr nocapture noundef writeonly %d, ptr nocapture noundef read ; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds half, ptr [[TMP18]], i64 [[TMP21]] ; CHECK-NEXT: store <vscale x 8 x half> [[TMP16]], ptr [[TMP18]], align 2 ; CHECK-NEXT: store <vscale x 8 x half> [[TMP17]], ptr [[TMP22]], align 2 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP9]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]] ; CHECK-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP23]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-gather-scatter.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-gather-scatter.ll index 663cf41..d336f5f 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-gather-scatter.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-gather-scatter.ll @@ -10,11 +10,10 @@ define void @gather_nxv4i32_ind64(ptr noalias nocapture readonly %a, ptr noalias ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -4 -; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]] ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 2 +; CHECK-NEXT: [[DOTNOT:%.*]] = sub nsw i64 0, [[TMP4]] +; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNOT]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -62,11 +61,10 @@ define void @scatter_nxv4i32_ind32(ptr noalias nocapture %a, ptr noalias nocaptu ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -4 -; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]] ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 2 +; CHECK-NEXT: [[DOTNOT:%.*]] = sub nsw i64 0, [[TMP4]] +; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNOT]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -113,11 +111,10 @@ define void @scatter_inv_nxv4i32(ptr noalias nocapture %inv, ptr noalias nocaptu ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -4 -; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]] ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 2 +; CHECK-NEXT: [[DOTNOT:%.*]] = sub nsw i64 0, [[TMP4]] +; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNOT]] ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x ptr> poison, ptr [[INV:%.*]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x ptr> [[BROADCAST_SPLATINSERT]], <vscale x 4 x ptr> poison, <vscale x 4 x i32> zeroinitializer ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] @@ -166,11 +163,10 @@ define void @gather_inv_nxv4i32(ptr noalias nocapture %a, ptr noalias nocapture ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -4 -; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]] ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 2 +; CHECK-NEXT: [[DOTNOT:%.*]] = sub nsw i64 0, [[TMP4]] +; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNOT]] ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x ptr> poison, ptr [[INV:%.*]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x ptr> [[BROADCAST_SPLATINSERT]], <vscale x 4 x ptr> poison, <vscale x 4 x i32> zeroinitializer ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] @@ -228,14 +224,12 @@ define void @gather_nxv4i32_ind64_stride2(ptr noalias nocapture %a, ptr noalias ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[N_VEC]], 0 ; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], i64 [[TMP7]], i64 [[N_VEC]] ; CHECK-NEXT: [[N_VEC1:%.*]] = sub i64 [[N]], [[TMP6]] -; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 3 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[DOTIDX1:%.*]] = shl i64 [[INDEX]], 3 ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[B:%.*]], i64 [[DOTIDX1]] -; CHECK-NEXT: [[DOTIDX3:%.*]] = shl nuw nsw i64 [[TMP3]], 5 +; CHECK-NEXT: [[DOTIDX3:%.*]] = shl nuw nsw i64 [[TMP2]], 5 ; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[B]], i64 [[DOTIDX3]] ; CHECK-NEXT: [[DOTIDX4:%.*]] = shl i64 [[INDEX]], 3 ; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i8, ptr [[TMP11]], i64 [[DOTIDX4]] @@ -251,7 +245,7 @@ define void @gather_nxv4i32_ind64_stride2(ptr noalias nocapture %a, ptr noalias ; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP12]], i64 [[DOTIDX]] ; CHECK-NEXT: store <vscale x 4 x float> [[WIDE_MASKED_GATHER]], ptr [[TMP12]], align 4 ; CHECK-NEXT: store <vscale x 4 x float> [[WIDE_MASKED_GATHER2]], ptr [[TMP14]], align 4 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]] ; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC1]] ; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: middle.block: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions-unusual-types.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions-unusual-types.ll index fefb5af..351da8b 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions-unusual-types.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions-unusual-types.ll @@ -12,13 +12,11 @@ define void @induction_i7(ptr %dst) #0 { ; CHECK-LABEL: define void @induction_i7( ; CHECK-SAME: ptr [[DST:%.*]]) ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 64, [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 64, [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP40:%.*]] = mul nuw i64 [[TMP4]], 2 ; CHECK-NEXT: [[TMP5:%.*]] = mul i64 [[TMP40]], 2 +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 64, [[TMP5]] +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 64, [[N_MOD_VF]] ; CHECK-NEXT: [[IND_END:%.*]] = trunc i64 [[N_VEC]] to i7 ; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP40]], i64 0 ; CHECK-NEXT: [[DOTSPLAT_:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer @@ -76,13 +74,11 @@ define void @induction_i3_zext(ptr %dst) #0 { ; CHECK-LABEL: define void @induction_i3_zext( ; CHECK-SAME: ptr [[DST:%.*]]) ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 64, [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 64, [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP40:%.*]] = mul nuw i64 [[TMP4]], 2 ; CHECK-NEXT: [[TMP5:%.*]] = mul i64 [[TMP40]], 2 +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 64, [[TMP5]] +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 64, [[N_MOD_VF]] ; CHECK-NEXT: [[IND_END:%.*]] = trunc i64 [[N_VEC]] to i3 ; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP40]], i64 0 ; CHECK-NEXT: [[DOTSPLAT_:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions.ll index 4f0637f..95836f8 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions.ll @@ -23,10 +23,8 @@ define void @cond_ind64(ptr noalias nocapture %a, ptr noalias nocapture readonly ; CHECK-NEXT: [[TMP3:%.*]] = shl nuw i64 [[TMP2]], 2 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP5:%.*]] = shl nuw i64 [[TMP4]], 2 ; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64() -; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP5]], i64 0 +; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP3]], i64 0 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: @@ -37,7 +35,7 @@ define void @cond_ind64(ptr noalias nocapture %a, ptr noalias nocapture readonly ; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[TMP10]], i32 4, <vscale x 4 x i1> [[TMP9]], <vscale x 4 x i32> poison) ; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[A:%.*]], i64 [[INDEX]] ; CHECK-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[WIDE_MASKED_LOAD]], ptr [[TMP11]], i32 4, <vscale x 4 x i1> [[TMP9]]) -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]] ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll index 47ce05d..fd0bc0b 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll @@ -437,8 +437,6 @@ define void @even_load_static_tc(ptr noalias nocapture readonly %A, ptr noalias ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2 ; CHECK-NEXT: [[N_VEC:%.*]] = sub nuw nsw i64 512, [[TMP1]] -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = shl nuw nsw i64 [[TMP2]], 2 ; CHECK-NEXT: [[IND_END:%.*]] = shl nuw nsw i64 [[N_VEC]], 1 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: @@ -452,7 +450,7 @@ define void @even_load_static_tc(ptr noalias nocapture readonly %A, ptr noalias ; CHECK-NEXT: [[TMP7:%.*]] = and i64 [[INDEX]], 9223372036854775804 ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i32, ptr [[B:%.*]], i64 [[TMP7]] ; CHECK-NEXT: store <vscale x 4 x i32> [[TMP6]], ptr [[TMP8]], align 4 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]] ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK: middle.block: @@ -508,8 +506,6 @@ define void @even_load_dynamic_tc(ptr noalias nocapture readonly %A, ptr noalias ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[N_MOD_VF]], 0 ; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], i64 [[TMP6]], i64 [[N_MOD_VF]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP4]], [[TMP9]] -; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP11:%.*]] = shl nuw nsw i64 [[TMP10]], 2 ; CHECK-NEXT: [[IND_END:%.*]] = shl i64 [[N_VEC]], 1 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: @@ -523,7 +519,7 @@ define void @even_load_dynamic_tc(ptr noalias nocapture readonly %A, ptr noalias ; CHECK-NEXT: [[TMP15:%.*]] = and i64 [[INDEX]], 9223372036854775804 ; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw i32, ptr [[B:%.*]], i64 [[TMP15]] ; CHECK-NEXT: store <vscale x 4 x i32> [[TMP14]], ptr [[TMP16]], align 4 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP11]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] ; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK: middle.block: @@ -803,12 +799,10 @@ define void @PR27626_0(ptr %p, i32 %z, i64 %n) #1 { ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[N_MOD_VF]], 0 ; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], i64 [[TMP3]], i64 [[N_MOD_VF]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub nsw i64 [[SMAX]], [[TMP6]] -; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP8:%.*]] = shl nuw nsw i64 [[TMP7]], 2 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[Z:%.*]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer ; CHECK-NEXT: [[TMP9:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64() -; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP8]], i64 0 +; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP3]], i64 0 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: @@ -822,7 +816,7 @@ define void @PR27626_0(ptr %p, i32 %z, i64 %n) #1 { ; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC]]) ; CHECK-NEXT: [[TMP15:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 0 ; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP15]], <vscale x 4 x ptr> [[TMP13]], i32 4, <vscale x 4 x i1> splat (i1 true)) -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]] ; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] @@ -875,10 +869,8 @@ define i32 @PR27626_1(ptr %p, i64 %n) #1 { ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[N_MOD_VF]], 0 ; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], i64 [[TMP3]], i64 [[N_MOD_VF]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub nsw i64 [[SMAX]], [[TMP6]] -; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP8:%.*]] = shl nuw nsw i64 [[TMP7]], 2 ; CHECK-NEXT: [[TMP9:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64() -; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP8]], i64 0 +; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP3]], i64 0 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: @@ -896,7 +888,7 @@ define i32 @PR27626_1(ptr %p, i64 %n) #1 { ; CHECK-NEXT: [[STRIDED_VEC2:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC1]]) ; CHECK-NEXT: [[TMP16:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC2]], 0 ; CHECK-NEXT: [[TMP17]] = add <vscale x 4 x i32> [[TMP16]], [[VEC_PHI]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]] ; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] @@ -952,12 +944,10 @@ define void @PR27626_2(ptr %p, i64 %n, i32 %z) #1 { ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[N_MOD_VF]], 0 ; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], i64 [[TMP3]], i64 [[N_MOD_VF]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub nsw i64 [[SMAX]], [[TMP6]] -; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP8:%.*]] = shl nuw nsw i64 [[TMP7]], 2 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[Z:%.*]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer ; CHECK-NEXT: [[TMP9:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64() -; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP8]], i64 0 +; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP3]], i64 0 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: @@ -971,7 +961,7 @@ define void @PR27626_2(ptr %p, i64 %n, i32 %z) #1 { ; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC]]) ; CHECK-NEXT: [[TMP15:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 0 ; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP15]], <vscale x 4 x ptr> [[TMP14]], i32 4, <vscale x 4 x i1> splat (i1 true)) -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]] ; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] @@ -1025,10 +1015,8 @@ define i32 @PR27626_3(ptr %p, i64 %n, i32 %z) #1 { ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[N_MOD_VF]], 0 ; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], i64 [[TMP3]], i64 [[N_MOD_VF]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub nsw i64 [[SMAX]], [[TMP6]] -; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP8:%.*]] = shl nuw nsw i64 [[TMP7]], 2 ; CHECK-NEXT: [[TMP9:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64() -; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP8]], i64 0 +; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP3]], i64 0 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: @@ -1047,7 +1035,7 @@ define i32 @PR27626_3(ptr %p, i64 %n, i32 %z) #1 { ; CHECK-NEXT: [[STRIDED_VEC2:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC1]]) ; CHECK-NEXT: [[TMP17:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC2]], 0 ; CHECK-NEXT: [[TMP18]] = add <vscale x 4 x i32> [[TMP17]], [[VEC_PHI]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]] ; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] @@ -1103,11 +1091,10 @@ define void @PR27626_4(ptr %a, i32 %x, i32 %y, i32 %z, i64 %n) #1 { ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp samesign ult i64 [[TMP2]], [[TMP4]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP5]], -4 -; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[TMP2]], [[DOTNEG]] ; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP7:%.*]] = shl nuw nsw i64 [[TMP6]], 2 +; CHECK-NEXT: [[DOTNOT:%.*]] = sub nsw i64 0, [[TMP7]] +; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[TMP2]], [[DOTNOT]] ; CHECK-NEXT: [[IND_END:%.*]] = shl nuw i64 [[N_VEC]], 1 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[X:%.*]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer @@ -1180,11 +1167,10 @@ define void @PR27626_5(ptr %a, i32 %x, i32 %y, i32 %z, i64 %n) #1 { ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp samesign ult i64 [[TMP2]], [[TMP4]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP5]], -4 -; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[TMP2]], [[DOTNEG]] ; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP8:%.*]] = shl nuw nsw i64 [[TMP7]], 2 +; CHECK-NEXT: [[DOTNOT:%.*]] = sub nsw i64 0, [[TMP8]] +; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[TMP2]], [[DOTNOT]] ; CHECK-NEXT: [[TMP11:%.*]] = shl nuw i64 [[N_VEC]], 1 ; CHECK-NEXT: [[IND_END:%.*]] = or disjoint i64 [[TMP11]], 3 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[X:%.*]], i64 0 @@ -1273,11 +1259,10 @@ define void @PR34743(ptr %a, ptr %b, i64 %n) #1 { ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] ; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP8]], -4 -; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[TMP1]], [[DOTNEG]] ; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP10:%.*]] = shl nuw nsw i64 [[TMP9]], 2 +; CHECK-NEXT: [[DOTNOT:%.*]] = sub nsw i64 0, [[TMP10]] +; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[TMP1]], [[DOTNOT]] ; CHECK-NEXT: [[IND_END:%.*]] = shl i64 [[N_VEC]], 1 ; CHECK-NEXT: [[TMP14:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64() ; CHECK-NEXT: [[TMP15:%.*]] = shl <vscale x 4 x i64> [[TMP14]], splat (i64 1) @@ -1372,10 +1357,8 @@ define void @interleave_deinterleave_factor3(ptr writeonly noalias %dst, ptr rea ; CHECK-NEXT: [[TMP3:%.*]] = shl nuw i64 [[TMP2]], 2 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub nuw nsw i64 1024, [[N_MOD_VF]] -; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP5:%.*]] = shl nuw i64 [[TMP4]], 2 ; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64() -; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP5]], i64 0 +; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP3]], i64 0 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: @@ -1402,7 +1385,7 @@ define void @interleave_deinterleave_factor3(ptr writeonly noalias %dst, ptr rea ; CHECK-NEXT: [[TMP17:%.*]] = shl <vscale x 4 x i32> [[TMP9]], [[TMP13]] ; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw i8, <vscale x 4 x ptr> [[TMP10]], i64 8 ; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP17]], <vscale x 4 x ptr> [[TMP25]], i32 4, <vscale x 4 x i1> splat (i1 true)) -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]] ; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP41:![0-9]+]] @@ -1466,8 +1449,6 @@ define void @interleave_deinterleave(ptr writeonly noalias %dst, ptr readonly %a ; CHECK-NEXT: [[TMP3:%.*]] = shl nuw i64 [[TMP2]], 2 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub nuw nsw i64 1024, [[N_MOD_VF]] -; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP5:%.*]] = shl nuw i64 [[TMP4]], 2 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -1492,7 +1473,7 @@ define void @interleave_deinterleave(ptr writeonly noalias %dst, ptr readonly %a ; CHECK-NEXT: [[TMP24:%.*]] = ashr <vscale x 4 x i32> [[TMP12]], [[TMP19]] ; CHECK-NEXT: [[INTERLEAVED_VEC13:%.*]] = call <vscale x 16 x i32> @llvm.vector.interleave4.nxv16i32(<vscale x 4 x i32> [[TMP20]], <vscale x 4 x i32> [[TMP22]], <vscale x 4 x i32> [[TMP23]], <vscale x 4 x i32> [[TMP24]]) ; CHECK-NEXT: store <vscale x 16 x i32> [[INTERLEAVED_VEC13]], ptr [[TMP21]], align 4 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP43:![0-9]+]] ; CHECK: middle.block: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-masked-accesses.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-masked-accesses.ll index f152dd3..40ad5bb 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-masked-accesses.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-masked-accesses.ll @@ -30,41 +30,39 @@ define dso_local void @masked_strided1(ptr noalias nocapture readonly %p, ptr no ; SCALAR_TAIL_FOLDING-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i32 [[TMP0]], 64 ; SCALAR_TAIL_FOLDING-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; SCALAR_TAIL_FOLDING: vector.ph: -; SCALAR_TAIL_FOLDING-NEXT: [[TMP2:%.*]] = call i32 @llvm.vscale.i32() -; SCALAR_TAIL_FOLDING-NEXT: [[TMP3:%.*]] = shl nuw i32 [[TMP2]], 4 -; SCALAR_TAIL_FOLDING-NEXT: [[N_MOD_VF:%.*]] = urem i32 1024, [[TMP3]] +; SCALAR_TAIL_FOLDING-NEXT: [[TMP1:%.*]] = call i32 @llvm.vscale.i32() +; SCALAR_TAIL_FOLDING-NEXT: [[TMP2:%.*]] = shl nuw i32 [[TMP1]], 4 +; SCALAR_TAIL_FOLDING-NEXT: [[N_MOD_VF:%.*]] = urem i32 1024, [[TMP2]] ; SCALAR_TAIL_FOLDING-NEXT: [[N_VEC:%.*]] = sub nuw nsw i32 1024, [[N_MOD_VF]] -; SCALAR_TAIL_FOLDING-NEXT: [[TMP4:%.*]] = call i32 @llvm.vscale.i32() -; SCALAR_TAIL_FOLDING-NEXT: [[TMP5:%.*]] = shl nuw i32 [[TMP4]], 4 ; SCALAR_TAIL_FOLDING-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[CONV]], i64 0 ; SCALAR_TAIL_FOLDING-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer -; SCALAR_TAIL_FOLDING-NEXT: [[TMP6:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32() -; SCALAR_TAIL_FOLDING-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP5]], i64 0 +; SCALAR_TAIL_FOLDING-NEXT: [[TMP3:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32() +; SCALAR_TAIL_FOLDING-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP2]], i64 0 ; SCALAR_TAIL_FOLDING-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer ; SCALAR_TAIL_FOLDING-NEXT: br label [[VECTOR_BODY:%.*]] ; SCALAR_TAIL_FOLDING: vector.body: ; SCALAR_TAIL_FOLDING-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; SCALAR_TAIL_FOLDING-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP6]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] -; SCALAR_TAIL_FOLDING-NEXT: [[TMP7:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]] -; SCALAR_TAIL_FOLDING-NEXT: [[TMP8:%.*]] = shl i32 [[INDEX]], 1 -; SCALAR_TAIL_FOLDING-NEXT: [[TMP9:%.*]] = sext i32 [[TMP8]] to i64 -; SCALAR_TAIL_FOLDING-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP9]] -; SCALAR_TAIL_FOLDING-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 32 x i1> @llvm.vector.interleave2.nxv32i1(<vscale x 16 x i1> [[TMP7]], <vscale x 16 x i1> [[TMP7]]) -; SCALAR_TAIL_FOLDING-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 32 x i8> @llvm.masked.load.nxv32i8.p0(ptr [[TMP10]], i32 1, <vscale x 32 x i1> [[INTERLEAVED_MASK]], <vscale x 32 x i8> poison) +; SCALAR_TAIL_FOLDING-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP3]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] +; SCALAR_TAIL_FOLDING-NEXT: [[TMP4:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]] +; SCALAR_TAIL_FOLDING-NEXT: [[TMP5:%.*]] = shl i32 [[INDEX]], 1 +; SCALAR_TAIL_FOLDING-NEXT: [[TMP6:%.*]] = sext i32 [[TMP5]] to i64 +; SCALAR_TAIL_FOLDING-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP6]] +; SCALAR_TAIL_FOLDING-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 32 x i1> @llvm.vector.interleave2.nxv32i1(<vscale x 16 x i1> [[TMP4]], <vscale x 16 x i1> [[TMP4]]) +; SCALAR_TAIL_FOLDING-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 32 x i8> @llvm.masked.load.nxv32i8.p0(ptr [[TMP7]], i32 1, <vscale x 32 x i1> [[INTERLEAVED_MASK]], <vscale x 32 x i8> poison) ; SCALAR_TAIL_FOLDING-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.vector.deinterleave2.nxv32i8(<vscale x 32 x i8> [[WIDE_MASKED_VEC]]) -; SCALAR_TAIL_FOLDING-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 0 -; SCALAR_TAIL_FOLDING-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 1 -; SCALAR_TAIL_FOLDING-NEXT: [[TMP13:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[TMP11]], <vscale x 16 x i8> [[TMP12]]) -; SCALAR_TAIL_FOLDING-NEXT: [[TMP14:%.*]] = sext i32 [[TMP8]] to i64 -; SCALAR_TAIL_FOLDING-NEXT: [[TMP15:%.*]] = getelementptr i8, ptr [[Q]], i64 [[TMP14]] -; SCALAR_TAIL_FOLDING-NEXT: [[TMP16:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP13]] -; SCALAR_TAIL_FOLDING-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 32 x i8> @llvm.vector.interleave2.nxv32i8(<vscale x 16 x i8> [[TMP13]], <vscale x 16 x i8> [[TMP16]]) -; SCALAR_TAIL_FOLDING-NEXT: [[INTERLEAVED_MASK3:%.*]] = call <vscale x 32 x i1> @llvm.vector.interleave2.nxv32i1(<vscale x 16 x i1> [[TMP7]], <vscale x 16 x i1> [[TMP7]]) -; SCALAR_TAIL_FOLDING-NEXT: call void @llvm.masked.store.nxv32i8.p0(<vscale x 32 x i8> [[INTERLEAVED_VEC]], ptr [[TMP15]], i32 1, <vscale x 32 x i1> [[INTERLEAVED_MASK3]]) -; SCALAR_TAIL_FOLDING-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP5]] +; SCALAR_TAIL_FOLDING-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 0 +; SCALAR_TAIL_FOLDING-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 1 +; SCALAR_TAIL_FOLDING-NEXT: [[TMP10:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[TMP8]], <vscale x 16 x i8> [[TMP9]]) +; SCALAR_TAIL_FOLDING-NEXT: [[TMP11:%.*]] = sext i32 [[TMP5]] to i64 +; SCALAR_TAIL_FOLDING-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[Q]], i64 [[TMP11]] +; SCALAR_TAIL_FOLDING-NEXT: [[TMP13:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP10]] +; SCALAR_TAIL_FOLDING-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 32 x i8> @llvm.vector.interleave2.nxv32i8(<vscale x 16 x i8> [[TMP10]], <vscale x 16 x i8> [[TMP13]]) +; SCALAR_TAIL_FOLDING-NEXT: [[INTERLEAVED_MASK3:%.*]] = call <vscale x 32 x i1> @llvm.vector.interleave2.nxv32i1(<vscale x 16 x i1> [[TMP4]], <vscale x 16 x i1> [[TMP4]]) +; SCALAR_TAIL_FOLDING-NEXT: call void @llvm.masked.store.nxv32i8.p0(<vscale x 32 x i8> [[INTERLEAVED_VEC]], ptr [[TMP12]], i32 1, <vscale x 32 x i1> [[INTERLEAVED_MASK3]]) +; SCALAR_TAIL_FOLDING-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP2]] ; SCALAR_TAIL_FOLDING-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT2]] -; SCALAR_TAIL_FOLDING-NEXT: [[TMP17:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; SCALAR_TAIL_FOLDING-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; SCALAR_TAIL_FOLDING-NEXT: [[TMP14:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] +; SCALAR_TAIL_FOLDING-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; SCALAR_TAIL_FOLDING: middle.block: ; SCALAR_TAIL_FOLDING-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N_MOD_VF]], 0 ; SCALAR_TAIL_FOLDING-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] @@ -80,42 +78,42 @@ define dso_local void @masked_strided1(ptr noalias nocapture readonly %p, ptr no ; PREDICATED_TAIL_FOLDING-NEXT: [[TMP1:%.*]] = shl nuw i32 [[TMP0]], 4 ; PREDICATED_TAIL_FOLDING-NEXT: [[TMP2:%.*]] = call i32 @llvm.vscale.i32() ; PREDICATED_TAIL_FOLDING-NEXT: [[TMP3:%.*]] = shl nuw i32 [[TMP2]], 4 -; PREDICATED_TAIL_FOLDING-NEXT: [[TMPA:%.*]] = sub i32 1024, [[TMP3]] -; PREDICATED_TAIL_FOLDING-NEXT: [[TMPB:%.*]] = icmp ult i32 [[TMP2]], 64 -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP4:%.*]] = select i1 [[TMPB]], i32 [[TMPA]], i32 0 +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP4:%.*]] = sub i32 1024, [[TMP3]] +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP5:%.*]] = icmp ult i32 [[TMP2]], 64 +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], i32 [[TMP4]], i32 0 ; PREDICATED_TAIL_FOLDING-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32 0, i32 1024) ; PREDICATED_TAIL_FOLDING-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[CONV]], i64 0 ; PREDICATED_TAIL_FOLDING-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP5:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32() +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP7:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32() ; PREDICATED_TAIL_FOLDING-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP1]], i64 0 ; PREDICATED_TAIL_FOLDING-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer ; PREDICATED_TAIL_FOLDING-NEXT: br label [[VECTOR_BODY:%.*]] ; PREDICATED_TAIL_FOLDING: vector.body: ; PREDICATED_TAIL_FOLDING-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; PREDICATED_TAIL_FOLDING-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 16 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] -; PREDICATED_TAIL_FOLDING-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP5]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP6:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]] -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP7:%.*]] = select <vscale x 16 x i1> [[ACTIVE_LANE_MASK]], <vscale x 16 x i1> [[TMP6]], <vscale x 16 x i1> zeroinitializer -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP8:%.*]] = shl i32 [[INDEX]], 1 -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP9:%.*]] = sext i32 [[TMP8]] to i64 -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP9]] -; PREDICATED_TAIL_FOLDING-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 32 x i1> @llvm.vector.interleave2.nxv32i1(<vscale x 16 x i1> [[TMP7]], <vscale x 16 x i1> [[TMP7]]) -; PREDICATED_TAIL_FOLDING-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 32 x i8> @llvm.masked.load.nxv32i8.p0(ptr [[TMP10]], i32 1, <vscale x 32 x i1> [[INTERLEAVED_MASK]], <vscale x 32 x i8> poison) +; PREDICATED_TAIL_FOLDING-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP7]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP8:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]] +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP9:%.*]] = select <vscale x 16 x i1> [[ACTIVE_LANE_MASK]], <vscale x 16 x i1> [[TMP8]], <vscale x 16 x i1> zeroinitializer +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP10:%.*]] = shl i32 [[INDEX]], 1 +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP11:%.*]] = sext i32 [[TMP10]] to i64 +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP11]] +; PREDICATED_TAIL_FOLDING-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 32 x i1> @llvm.vector.interleave2.nxv32i1(<vscale x 16 x i1> [[TMP9]], <vscale x 16 x i1> [[TMP9]]) +; PREDICATED_TAIL_FOLDING-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 32 x i8> @llvm.masked.load.nxv32i8.p0(ptr [[TMP12]], i32 1, <vscale x 32 x i1> [[INTERLEAVED_MASK]], <vscale x 32 x i8> poison) ; PREDICATED_TAIL_FOLDING-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.vector.deinterleave2.nxv32i8(<vscale x 32 x i8> [[WIDE_MASKED_VEC]]) -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 0 -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 1 -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP13:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[TMP11]], <vscale x 16 x i8> [[TMP12]]) -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP14:%.*]] = sext i32 [[TMP8]] to i64 -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP15:%.*]] = getelementptr i8, ptr [[Q]], i64 [[TMP14]] -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP16:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP13]] -; PREDICATED_TAIL_FOLDING-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 32 x i8> @llvm.vector.interleave2.nxv32i8(<vscale x 16 x i8> [[TMP13]], <vscale x 16 x i8> [[TMP16]]) -; PREDICATED_TAIL_FOLDING-NEXT: [[INTERLEAVED_MASK3:%.*]] = call <vscale x 32 x i1> @llvm.vector.interleave2.nxv32i1(<vscale x 16 x i1> [[TMP7]], <vscale x 16 x i1> [[TMP7]]) -; PREDICATED_TAIL_FOLDING-NEXT: call void @llvm.masked.store.nxv32i8.p0(<vscale x 32 x i8> [[INTERLEAVED_VEC]], ptr [[TMP15]], i32 1, <vscale x 32 x i1> [[INTERLEAVED_MASK3]]) +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP13:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 0 +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP14:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 1 +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP15:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[TMP13]], <vscale x 16 x i8> [[TMP14]]) +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP16:%.*]] = sext i32 [[TMP10]] to i64 +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[Q]], i64 [[TMP16]] +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP18:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP15]] +; PREDICATED_TAIL_FOLDING-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 32 x i8> @llvm.vector.interleave2.nxv32i8(<vscale x 16 x i8> [[TMP15]], <vscale x 16 x i8> [[TMP18]]) +; PREDICATED_TAIL_FOLDING-NEXT: [[INTERLEAVED_MASK3:%.*]] = call <vscale x 32 x i1> @llvm.vector.interleave2.nxv32i1(<vscale x 16 x i1> [[TMP9]], <vscale x 16 x i1> [[TMP9]]) +; PREDICATED_TAIL_FOLDING-NEXT: call void @llvm.masked.store.nxv32i8.p0(<vscale x 32 x i8> [[INTERLEAVED_VEC]], ptr [[TMP17]], i32 1, <vscale x 32 x i1> [[INTERLEAVED_MASK3]]) ; PREDICATED_TAIL_FOLDING-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], [[TMP1]] -; PREDICATED_TAIL_FOLDING-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32 [[INDEX]], i32 [[TMP4]]) +; PREDICATED_TAIL_FOLDING-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32 [[INDEX]], i32 [[TMP6]]) ; PREDICATED_TAIL_FOLDING-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT2]] -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP17:%.*]] = extractelement <vscale x 16 x i1> [[ACTIVE_LANE_MASK_NEXT]], i64 0 -; PREDICATED_TAIL_FOLDING-NEXT: br i1 [[TMP17]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP0:![0-9]+]] +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP19:%.*]] = extractelement <vscale x 16 x i1> [[ACTIVE_LANE_MASK_NEXT]], i64 0 +; PREDICATED_TAIL_FOLDING-NEXT: br i1 [[TMP19]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP0:![0-9]+]] ; PREDICATED_TAIL_FOLDING: middle.block: ; PREDICATED_TAIL_FOLDING-NEXT: br label [[FOR_END:%.*]] ; PREDICATED_TAIL_FOLDING: scalar.ph: @@ -176,34 +174,32 @@ define dso_local void @masked_strided2(ptr noalias nocapture readnone %p, ptr no ; SCALAR_TAIL_FOLDING-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i32 [[TMP0]], 64 ; SCALAR_TAIL_FOLDING-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; SCALAR_TAIL_FOLDING: vector.ph: -; SCALAR_TAIL_FOLDING-NEXT: [[TMP2:%.*]] = call i32 @llvm.vscale.i32() -; SCALAR_TAIL_FOLDING-NEXT: [[TMP3:%.*]] = shl nuw i32 [[TMP2]], 4 -; SCALAR_TAIL_FOLDING-NEXT: [[N_MOD_VF:%.*]] = urem i32 1024, [[TMP3]] +; SCALAR_TAIL_FOLDING-NEXT: [[TMP1:%.*]] = call i32 @llvm.vscale.i32() +; SCALAR_TAIL_FOLDING-NEXT: [[TMP2:%.*]] = shl nuw i32 [[TMP1]], 4 +; SCALAR_TAIL_FOLDING-NEXT: [[N_MOD_VF:%.*]] = urem i32 1024, [[TMP2]] ; SCALAR_TAIL_FOLDING-NEXT: [[N_VEC:%.*]] = sub nuw nsw i32 1024, [[N_MOD_VF]] -; SCALAR_TAIL_FOLDING-NEXT: [[TMP4:%.*]] = call i32 @llvm.vscale.i32() -; SCALAR_TAIL_FOLDING-NEXT: [[TMP5:%.*]] = shl nuw i32 [[TMP4]], 4 ; SCALAR_TAIL_FOLDING-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[CONV]], i64 0 ; SCALAR_TAIL_FOLDING-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer -; SCALAR_TAIL_FOLDING-NEXT: [[TMP6:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32() -; SCALAR_TAIL_FOLDING-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP5]], i64 0 +; SCALAR_TAIL_FOLDING-NEXT: [[TMP3:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32() +; SCALAR_TAIL_FOLDING-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP2]], i64 0 ; SCALAR_TAIL_FOLDING-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer ; SCALAR_TAIL_FOLDING-NEXT: br label [[VECTOR_BODY:%.*]] ; SCALAR_TAIL_FOLDING: vector.body: ; SCALAR_TAIL_FOLDING-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; SCALAR_TAIL_FOLDING-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP6]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] -; SCALAR_TAIL_FOLDING-NEXT: [[TMP7:%.*]] = shl nuw nsw <vscale x 16 x i32> [[VEC_IND]], splat (i32 1) -; SCALAR_TAIL_FOLDING-NEXT: [[TMP8:%.*]] = zext nneg <vscale x 16 x i32> [[TMP7]] to <vscale x 16 x i64> -; SCALAR_TAIL_FOLDING-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP8]] -; SCALAR_TAIL_FOLDING-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> splat (i8 1), <vscale x 16 x ptr> [[TMP9]], i32 1, <vscale x 16 x i1> splat (i1 true)) -; SCALAR_TAIL_FOLDING-NEXT: [[TMP10:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]] -; SCALAR_TAIL_FOLDING-NEXT: [[TMP11:%.*]] = or disjoint <vscale x 16 x i32> [[TMP7]], splat (i32 1) -; SCALAR_TAIL_FOLDING-NEXT: [[TMP12:%.*]] = zext nneg <vscale x 16 x i32> [[TMP11]] to <vscale x 16 x i64> -; SCALAR_TAIL_FOLDING-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP12]] -; SCALAR_TAIL_FOLDING-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> splat (i8 2), <vscale x 16 x ptr> [[TMP13]], i32 1, <vscale x 16 x i1> [[TMP10]]) -; SCALAR_TAIL_FOLDING-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP5]] +; SCALAR_TAIL_FOLDING-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP3]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] +; SCALAR_TAIL_FOLDING-NEXT: [[TMP4:%.*]] = shl nuw nsw <vscale x 16 x i32> [[VEC_IND]], splat (i32 1) +; SCALAR_TAIL_FOLDING-NEXT: [[TMP5:%.*]] = zext nneg <vscale x 16 x i32> [[TMP4]] to <vscale x 16 x i64> +; SCALAR_TAIL_FOLDING-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP5]] +; SCALAR_TAIL_FOLDING-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> splat (i8 1), <vscale x 16 x ptr> [[TMP6]], i32 1, <vscale x 16 x i1> splat (i1 true)) +; SCALAR_TAIL_FOLDING-NEXT: [[TMP7:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]] +; SCALAR_TAIL_FOLDING-NEXT: [[TMP8:%.*]] = or disjoint <vscale x 16 x i32> [[TMP4]], splat (i32 1) +; SCALAR_TAIL_FOLDING-NEXT: [[TMP9:%.*]] = zext nneg <vscale x 16 x i32> [[TMP8]] to <vscale x 16 x i64> +; SCALAR_TAIL_FOLDING-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP9]] +; SCALAR_TAIL_FOLDING-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> splat (i8 2), <vscale x 16 x ptr> [[TMP10]], i32 1, <vscale x 16 x i1> [[TMP7]]) +; SCALAR_TAIL_FOLDING-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP2]] ; SCALAR_TAIL_FOLDING-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT2]] -; SCALAR_TAIL_FOLDING-NEXT: [[TMP14:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; SCALAR_TAIL_FOLDING-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; SCALAR_TAIL_FOLDING-NEXT: [[TMP11:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] +; SCALAR_TAIL_FOLDING-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; SCALAR_TAIL_FOLDING: middle.block: ; SCALAR_TAIL_FOLDING-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N_MOD_VF]], 0 ; SCALAR_TAIL_FOLDING-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] @@ -219,35 +215,35 @@ define dso_local void @masked_strided2(ptr noalias nocapture readnone %p, ptr no ; PREDICATED_TAIL_FOLDING-NEXT: [[TMP1:%.*]] = shl nuw i32 [[TMP0]], 4 ; PREDICATED_TAIL_FOLDING-NEXT: [[TMP2:%.*]] = call i32 @llvm.vscale.i32() ; PREDICATED_TAIL_FOLDING-NEXT: [[TMP3:%.*]] = shl nuw i32 [[TMP2]], 4 -; PREDICATED_TAIL_FOLDING-NEXT: [[TMPA:%.*]] = sub i32 1024, [[TMP3]] -; PREDICATED_TAIL_FOLDING-NEXT: [[TMPB:%.*]] = icmp ult i32 [[TMP2]], 64 -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP4:%.*]] = select i1 [[TMPB]], i32 [[TMPA]], i32 0 +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP4:%.*]] = sub i32 1024, [[TMP3]] +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP5:%.*]] = icmp ult i32 [[TMP2]], 64 +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], i32 [[TMP4]], i32 0 ; PREDICATED_TAIL_FOLDING-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32 0, i32 1024) ; PREDICATED_TAIL_FOLDING-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[CONV]], i64 0 ; PREDICATED_TAIL_FOLDING-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP5:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32() +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP7:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32() ; PREDICATED_TAIL_FOLDING-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP1]], i64 0 ; PREDICATED_TAIL_FOLDING-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer ; PREDICATED_TAIL_FOLDING-NEXT: br label [[VECTOR_BODY:%.*]] ; PREDICATED_TAIL_FOLDING: vector.body: ; PREDICATED_TAIL_FOLDING-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; PREDICATED_TAIL_FOLDING-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 16 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] -; PREDICATED_TAIL_FOLDING-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP5]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP6:%.*]] = shl nuw nsw <vscale x 16 x i32> [[VEC_IND]], splat (i32 1) -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP7:%.*]] = zext nneg <vscale x 16 x i32> [[TMP6]] to <vscale x 16 x i64> -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP7]] -; PREDICATED_TAIL_FOLDING-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> splat (i8 1), <vscale x 16 x ptr> [[TMP8]], i32 1, <vscale x 16 x i1> [[ACTIVE_LANE_MASK]]) -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP9:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]] -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP10:%.*]] = select <vscale x 16 x i1> [[ACTIVE_LANE_MASK]], <vscale x 16 x i1> [[TMP9]], <vscale x 16 x i1> zeroinitializer -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP11:%.*]] = or disjoint <vscale x 16 x i32> [[TMP6]], splat (i32 1) -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP12:%.*]] = zext nneg <vscale x 16 x i32> [[TMP11]] to <vscale x 16 x i64> -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP12]] -; PREDICATED_TAIL_FOLDING-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> splat (i8 2), <vscale x 16 x ptr> [[TMP13]], i32 1, <vscale x 16 x i1> [[TMP10]]) +; PREDICATED_TAIL_FOLDING-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP7]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP8:%.*]] = shl nuw nsw <vscale x 16 x i32> [[VEC_IND]], splat (i32 1) +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP9:%.*]] = zext nneg <vscale x 16 x i32> [[TMP8]] to <vscale x 16 x i64> +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP9]] +; PREDICATED_TAIL_FOLDING-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> splat (i8 1), <vscale x 16 x ptr> [[TMP10]], i32 1, <vscale x 16 x i1> [[ACTIVE_LANE_MASK]]) +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP11:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]] +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP12:%.*]] = select <vscale x 16 x i1> [[ACTIVE_LANE_MASK]], <vscale x 16 x i1> [[TMP11]], <vscale x 16 x i1> zeroinitializer +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP13:%.*]] = or disjoint <vscale x 16 x i32> [[TMP8]], splat (i32 1) +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP14:%.*]] = zext nneg <vscale x 16 x i32> [[TMP13]] to <vscale x 16 x i64> +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP15:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP14]] +; PREDICATED_TAIL_FOLDING-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> splat (i8 2), <vscale x 16 x ptr> [[TMP15]], i32 1, <vscale x 16 x i1> [[TMP12]]) ; PREDICATED_TAIL_FOLDING-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], [[TMP1]] -; PREDICATED_TAIL_FOLDING-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32 [[INDEX]], i32 [[TMP4]]) +; PREDICATED_TAIL_FOLDING-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32 [[INDEX]], i32 [[TMP6]]) ; PREDICATED_TAIL_FOLDING-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT2]] -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP14:%.*]] = extractelement <vscale x 16 x i1> [[ACTIVE_LANE_MASK_NEXT]], i64 0 -; PREDICATED_TAIL_FOLDING-NEXT: br i1 [[TMP14]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP4:![0-9]+]] +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP16:%.*]] = extractelement <vscale x 16 x i1> [[ACTIVE_LANE_MASK_NEXT]], i64 0 +; PREDICATED_TAIL_FOLDING-NEXT: br i1 [[TMP16]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP4:![0-9]+]] ; PREDICATED_TAIL_FOLDING: middle.block: ; PREDICATED_TAIL_FOLDING-NEXT: br label [[FOR_END:%.*]] ; PREDICATED_TAIL_FOLDING: scalar.ph: @@ -304,37 +300,35 @@ define dso_local void @masked_strided3(ptr noalias nocapture readnone %p, ptr no ; SCALAR_TAIL_FOLDING-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i32 [[TMP0]], 64 ; SCALAR_TAIL_FOLDING-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; SCALAR_TAIL_FOLDING: vector.ph: -; SCALAR_TAIL_FOLDING-NEXT: [[TMP2:%.*]] = call i32 @llvm.vscale.i32() -; SCALAR_TAIL_FOLDING-NEXT: [[TMP3:%.*]] = shl nuw i32 [[TMP2]], 4 -; SCALAR_TAIL_FOLDING-NEXT: [[N_MOD_VF:%.*]] = urem i32 1024, [[TMP3]] +; SCALAR_TAIL_FOLDING-NEXT: [[TMP1:%.*]] = call i32 @llvm.vscale.i32() +; SCALAR_TAIL_FOLDING-NEXT: [[TMP2:%.*]] = shl nuw i32 [[TMP1]], 4 +; SCALAR_TAIL_FOLDING-NEXT: [[N_MOD_VF:%.*]] = urem i32 1024, [[TMP2]] ; SCALAR_TAIL_FOLDING-NEXT: [[N_VEC:%.*]] = sub nuw nsw i32 1024, [[N_MOD_VF]] -; SCALAR_TAIL_FOLDING-NEXT: [[TMP4:%.*]] = call i32 @llvm.vscale.i32() -; SCALAR_TAIL_FOLDING-NEXT: [[TMP5:%.*]] = shl nuw i32 [[TMP4]], 4 ; SCALAR_TAIL_FOLDING-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[CONV]], i64 0 ; SCALAR_TAIL_FOLDING-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer ; SCALAR_TAIL_FOLDING-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[CONV3]], i64 0 ; SCALAR_TAIL_FOLDING-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer -; SCALAR_TAIL_FOLDING-NEXT: [[TMP6:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32() -; SCALAR_TAIL_FOLDING-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP5]], i64 0 +; SCALAR_TAIL_FOLDING-NEXT: [[TMP3:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32() +; SCALAR_TAIL_FOLDING-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP2]], i64 0 ; SCALAR_TAIL_FOLDING-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT3]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer ; SCALAR_TAIL_FOLDING-NEXT: br label [[VECTOR_BODY:%.*]] ; SCALAR_TAIL_FOLDING: vector.body: ; SCALAR_TAIL_FOLDING-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; SCALAR_TAIL_FOLDING-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP6]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] -; SCALAR_TAIL_FOLDING-NEXT: [[TMP7:%.*]] = shl nuw nsw <vscale x 16 x i32> [[VEC_IND]], splat (i32 1) -; SCALAR_TAIL_FOLDING-NEXT: [[TMP8:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]] -; SCALAR_TAIL_FOLDING-NEXT: [[TMP9:%.*]] = zext nneg <vscale x 16 x i32> [[TMP7]] to <vscale x 16 x i64> -; SCALAR_TAIL_FOLDING-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP9]] -; SCALAR_TAIL_FOLDING-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> splat (i8 1), <vscale x 16 x ptr> [[TMP10]], i32 1, <vscale x 16 x i1> [[TMP8]]) -; SCALAR_TAIL_FOLDING-NEXT: [[TMP11:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT2]] -; SCALAR_TAIL_FOLDING-NEXT: [[TMP12:%.*]] = or disjoint <vscale x 16 x i32> [[TMP7]], splat (i32 1) -; SCALAR_TAIL_FOLDING-NEXT: [[TMP13:%.*]] = zext nneg <vscale x 16 x i32> [[TMP12]] to <vscale x 16 x i64> -; SCALAR_TAIL_FOLDING-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP13]] -; SCALAR_TAIL_FOLDING-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> splat (i8 2), <vscale x 16 x ptr> [[TMP14]], i32 1, <vscale x 16 x i1> [[TMP11]]) -; SCALAR_TAIL_FOLDING-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP5]] +; SCALAR_TAIL_FOLDING-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP3]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] +; SCALAR_TAIL_FOLDING-NEXT: [[TMP4:%.*]] = shl nuw nsw <vscale x 16 x i32> [[VEC_IND]], splat (i32 1) +; SCALAR_TAIL_FOLDING-NEXT: [[TMP5:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]] +; SCALAR_TAIL_FOLDING-NEXT: [[TMP6:%.*]] = zext nneg <vscale x 16 x i32> [[TMP4]] to <vscale x 16 x i64> +; SCALAR_TAIL_FOLDING-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP6]] +; SCALAR_TAIL_FOLDING-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> splat (i8 1), <vscale x 16 x ptr> [[TMP7]], i32 1, <vscale x 16 x i1> [[TMP5]]) +; SCALAR_TAIL_FOLDING-NEXT: [[TMP8:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT2]] +; SCALAR_TAIL_FOLDING-NEXT: [[TMP9:%.*]] = or disjoint <vscale x 16 x i32> [[TMP4]], splat (i32 1) +; SCALAR_TAIL_FOLDING-NEXT: [[TMP10:%.*]] = zext nneg <vscale x 16 x i32> [[TMP9]] to <vscale x 16 x i64> +; SCALAR_TAIL_FOLDING-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP10]] +; SCALAR_TAIL_FOLDING-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> splat (i8 2), <vscale x 16 x ptr> [[TMP11]], i32 1, <vscale x 16 x i1> [[TMP8]]) +; SCALAR_TAIL_FOLDING-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP2]] ; SCALAR_TAIL_FOLDING-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT4]] -; SCALAR_TAIL_FOLDING-NEXT: [[TMP15:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; SCALAR_TAIL_FOLDING-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; SCALAR_TAIL_FOLDING-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] +; SCALAR_TAIL_FOLDING-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; SCALAR_TAIL_FOLDING: middle.block: ; SCALAR_TAIL_FOLDING-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N_MOD_VF]], 0 ; SCALAR_TAIL_FOLDING-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] @@ -351,39 +345,39 @@ define dso_local void @masked_strided3(ptr noalias nocapture readnone %p, ptr no ; PREDICATED_TAIL_FOLDING-NEXT: [[TMP1:%.*]] = shl nuw i32 [[TMP0]], 4 ; PREDICATED_TAIL_FOLDING-NEXT: [[TMP2:%.*]] = call i32 @llvm.vscale.i32() ; PREDICATED_TAIL_FOLDING-NEXT: [[TMP3:%.*]] = shl nuw i32 [[TMP2]], 4 -; PREDICATED_TAIL_FOLDING-NEXT: [[TMPA:%.*]] = sub i32 1024, [[TMP3]] -; PREDICATED_TAIL_FOLDING-NEXT: [[TMPB:%.*]] = icmp ult i32 [[TMP2]], 64 -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP4:%.*]] = select i1 [[TMPB]], i32 [[TMPA]], i32 0 +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP4:%.*]] = sub i32 1024, [[TMP3]] +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP5:%.*]] = icmp ult i32 [[TMP2]], 64 +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], i32 [[TMP4]], i32 0 ; PREDICATED_TAIL_FOLDING-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32 0, i32 1024) ; PREDICATED_TAIL_FOLDING-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[CONV]], i64 0 ; PREDICATED_TAIL_FOLDING-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer ; PREDICATED_TAIL_FOLDING-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[CONV3]], i64 0 ; PREDICATED_TAIL_FOLDING-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP5:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32() +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP7:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32() ; PREDICATED_TAIL_FOLDING-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP1]], i64 0 ; PREDICATED_TAIL_FOLDING-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT3]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer ; PREDICATED_TAIL_FOLDING-NEXT: br label [[VECTOR_BODY:%.*]] ; PREDICATED_TAIL_FOLDING: vector.body: ; PREDICATED_TAIL_FOLDING-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; PREDICATED_TAIL_FOLDING-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 16 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] -; PREDICATED_TAIL_FOLDING-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP5]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP6:%.*]] = shl nuw nsw <vscale x 16 x i32> [[VEC_IND]], splat (i32 1) -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP7:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]] -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP8:%.*]] = select <vscale x 16 x i1> [[ACTIVE_LANE_MASK]], <vscale x 16 x i1> [[TMP7]], <vscale x 16 x i1> zeroinitializer -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP9:%.*]] = zext nneg <vscale x 16 x i32> [[TMP6]] to <vscale x 16 x i64> -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP9]] -; PREDICATED_TAIL_FOLDING-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> splat (i8 1), <vscale x 16 x ptr> [[TMP10]], i32 1, <vscale x 16 x i1> [[TMP8]]) -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP11:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT2]] -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP12:%.*]] = select <vscale x 16 x i1> [[ACTIVE_LANE_MASK]], <vscale x 16 x i1> [[TMP11]], <vscale x 16 x i1> zeroinitializer -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP13:%.*]] = or disjoint <vscale x 16 x i32> [[TMP6]], splat (i32 1) -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP14:%.*]] = zext nneg <vscale x 16 x i32> [[TMP13]] to <vscale x 16 x i64> -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP15:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP14]] -; PREDICATED_TAIL_FOLDING-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> splat (i8 2), <vscale x 16 x ptr> [[TMP15]], i32 1, <vscale x 16 x i1> [[TMP12]]) +; PREDICATED_TAIL_FOLDING-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP7]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP8:%.*]] = shl nuw nsw <vscale x 16 x i32> [[VEC_IND]], splat (i32 1) +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP9:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]] +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP10:%.*]] = select <vscale x 16 x i1> [[ACTIVE_LANE_MASK]], <vscale x 16 x i1> [[TMP9]], <vscale x 16 x i1> zeroinitializer +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP11:%.*]] = zext nneg <vscale x 16 x i32> [[TMP8]] to <vscale x 16 x i64> +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP11]] +; PREDICATED_TAIL_FOLDING-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> splat (i8 1), <vscale x 16 x ptr> [[TMP12]], i32 1, <vscale x 16 x i1> [[TMP10]]) +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP13:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT2]] +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP14:%.*]] = select <vscale x 16 x i1> [[ACTIVE_LANE_MASK]], <vscale x 16 x i1> [[TMP13]], <vscale x 16 x i1> zeroinitializer +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP15:%.*]] = or disjoint <vscale x 16 x i32> [[TMP8]], splat (i32 1) +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP16:%.*]] = zext nneg <vscale x 16 x i32> [[TMP15]] to <vscale x 16 x i64> +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP17:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP16]] +; PREDICATED_TAIL_FOLDING-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> splat (i8 2), <vscale x 16 x ptr> [[TMP17]], i32 1, <vscale x 16 x i1> [[TMP14]]) ; PREDICATED_TAIL_FOLDING-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], [[TMP1]] -; PREDICATED_TAIL_FOLDING-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32 [[INDEX]], i32 [[TMP4]]) +; PREDICATED_TAIL_FOLDING-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32 [[INDEX]], i32 [[TMP6]]) ; PREDICATED_TAIL_FOLDING-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT4]] -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP16:%.*]] = extractelement <vscale x 16 x i1> [[ACTIVE_LANE_MASK_NEXT]], i64 0 -; PREDICATED_TAIL_FOLDING-NEXT: br i1 [[TMP16]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP6:![0-9]+]] +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP18:%.*]] = extractelement <vscale x 16 x i1> [[ACTIVE_LANE_MASK_NEXT]], i64 0 +; PREDICATED_TAIL_FOLDING-NEXT: br i1 [[TMP18]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP6:![0-9]+]] ; PREDICATED_TAIL_FOLDING: middle.block: ; PREDICATED_TAIL_FOLDING-NEXT: br label [[FOR_END:%.*]] ; PREDICATED_TAIL_FOLDING: scalar.ph: @@ -452,45 +446,43 @@ define dso_local void @masked_strided_factor4(ptr noalias nocapture readonly %p, ; SCALAR_TAIL_FOLDING-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i32 [[TMP0]], 64 ; SCALAR_TAIL_FOLDING-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; SCALAR_TAIL_FOLDING: vector.ph: -; SCALAR_TAIL_FOLDING-NEXT: [[TMP2:%.*]] = call i32 @llvm.vscale.i32() -; SCALAR_TAIL_FOLDING-NEXT: [[TMP3:%.*]] = shl nuw i32 [[TMP2]], 4 -; SCALAR_TAIL_FOLDING-NEXT: [[N_MOD_VF:%.*]] = urem i32 1024, [[TMP3]] +; SCALAR_TAIL_FOLDING-NEXT: [[TMP1:%.*]] = call i32 @llvm.vscale.i32() +; SCALAR_TAIL_FOLDING-NEXT: [[TMP2:%.*]] = shl nuw i32 [[TMP1]], 4 +; SCALAR_TAIL_FOLDING-NEXT: [[N_MOD_VF:%.*]] = urem i32 1024, [[TMP2]] ; SCALAR_TAIL_FOLDING-NEXT: [[N_VEC:%.*]] = sub nuw nsw i32 1024, [[N_MOD_VF]] -; SCALAR_TAIL_FOLDING-NEXT: [[TMP4:%.*]] = call i32 @llvm.vscale.i32() -; SCALAR_TAIL_FOLDING-NEXT: [[TMP5:%.*]] = shl nuw i32 [[TMP4]], 4 ; SCALAR_TAIL_FOLDING-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[CONV]], i64 0 ; SCALAR_TAIL_FOLDING-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer -; SCALAR_TAIL_FOLDING-NEXT: [[TMP6:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32() -; SCALAR_TAIL_FOLDING-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP5]], i64 0 +; SCALAR_TAIL_FOLDING-NEXT: [[TMP3:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32() +; SCALAR_TAIL_FOLDING-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP2]], i64 0 ; SCALAR_TAIL_FOLDING-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer ; SCALAR_TAIL_FOLDING-NEXT: br label [[VECTOR_BODY:%.*]] ; SCALAR_TAIL_FOLDING: vector.body: ; SCALAR_TAIL_FOLDING-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; SCALAR_TAIL_FOLDING-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP6]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] -; SCALAR_TAIL_FOLDING-NEXT: [[TMP7:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]] -; SCALAR_TAIL_FOLDING-NEXT: [[TMP8:%.*]] = shl i32 [[INDEX]], 2 -; SCALAR_TAIL_FOLDING-NEXT: [[TMP9:%.*]] = sext i32 [[TMP8]] to i64 -; SCALAR_TAIL_FOLDING-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP9]] -; SCALAR_TAIL_FOLDING-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 64 x i1> @llvm.vector.interleave4.nxv64i1(<vscale x 16 x i1> [[TMP7]], <vscale x 16 x i1> [[TMP7]], <vscale x 16 x i1> [[TMP7]], <vscale x 16 x i1> [[TMP7]]) -; SCALAR_TAIL_FOLDING-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 64 x i8> @llvm.masked.load.nxv64i8.p0(ptr [[TMP10]], i32 1, <vscale x 64 x i1> [[INTERLEAVED_MASK]], <vscale x 64 x i8> poison) +; SCALAR_TAIL_FOLDING-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP3]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] +; SCALAR_TAIL_FOLDING-NEXT: [[TMP4:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]] +; SCALAR_TAIL_FOLDING-NEXT: [[TMP5:%.*]] = shl i32 [[INDEX]], 2 +; SCALAR_TAIL_FOLDING-NEXT: [[TMP6:%.*]] = sext i32 [[TMP5]] to i64 +; SCALAR_TAIL_FOLDING-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP6]] +; SCALAR_TAIL_FOLDING-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 64 x i1> @llvm.vector.interleave4.nxv64i1(<vscale x 16 x i1> [[TMP4]], <vscale x 16 x i1> [[TMP4]], <vscale x 16 x i1> [[TMP4]], <vscale x 16 x i1> [[TMP4]]) +; SCALAR_TAIL_FOLDING-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 64 x i8> @llvm.masked.load.nxv64i8.p0(ptr [[TMP7]], i32 1, <vscale x 64 x i1> [[INTERLEAVED_MASK]], <vscale x 64 x i8> poison) ; SCALAR_TAIL_FOLDING-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.vector.deinterleave4.nxv64i8(<vscale x 64 x i8> [[WIDE_MASKED_VEC]]) -; SCALAR_TAIL_FOLDING-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 0 -; SCALAR_TAIL_FOLDING-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 1 -; SCALAR_TAIL_FOLDING-NEXT: [[TMP13:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 2 -; SCALAR_TAIL_FOLDING-NEXT: [[TMP14:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 3 -; SCALAR_TAIL_FOLDING-NEXT: [[TMP15:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[TMP11]], <vscale x 16 x i8> [[TMP12]]) -; SCALAR_TAIL_FOLDING-NEXT: [[TMP16:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP15]] -; SCALAR_TAIL_FOLDING-NEXT: [[TMP17:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[TMP13]], <vscale x 16 x i8> [[TMP14]]) -; SCALAR_TAIL_FOLDING-NEXT: [[TMP18:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP17]] -; SCALAR_TAIL_FOLDING-NEXT: [[TMP19:%.*]] = sext i32 [[TMP8]] to i64 -; SCALAR_TAIL_FOLDING-NEXT: [[TMP20:%.*]] = getelementptr i8, ptr [[Q]], i64 [[TMP19]] -; SCALAR_TAIL_FOLDING-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 64 x i8> @llvm.vector.interleave4.nxv64i8(<vscale x 16 x i8> [[TMP15]], <vscale x 16 x i8> [[TMP16]], <vscale x 16 x i8> [[TMP17]], <vscale x 16 x i8> [[TMP18]]) -; SCALAR_TAIL_FOLDING-NEXT: [[INTERLEAVED_MASK3:%.*]] = call <vscale x 64 x i1> @llvm.vector.interleave4.nxv64i1(<vscale x 16 x i1> [[TMP7]], <vscale x 16 x i1> [[TMP7]], <vscale x 16 x i1> [[TMP7]], <vscale x 16 x i1> [[TMP7]]) -; SCALAR_TAIL_FOLDING-NEXT: call void @llvm.masked.store.nxv64i8.p0(<vscale x 64 x i8> [[INTERLEAVED_VEC]], ptr [[TMP20]], i32 1, <vscale x 64 x i1> [[INTERLEAVED_MASK3]]) -; SCALAR_TAIL_FOLDING-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP5]] +; SCALAR_TAIL_FOLDING-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 0 +; SCALAR_TAIL_FOLDING-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 1 +; SCALAR_TAIL_FOLDING-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 2 +; SCALAR_TAIL_FOLDING-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 3 +; SCALAR_TAIL_FOLDING-NEXT: [[TMP12:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[TMP8]], <vscale x 16 x i8> [[TMP9]]) +; SCALAR_TAIL_FOLDING-NEXT: [[TMP13:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP12]] +; SCALAR_TAIL_FOLDING-NEXT: [[TMP14:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[TMP10]], <vscale x 16 x i8> [[TMP11]]) +; SCALAR_TAIL_FOLDING-NEXT: [[TMP15:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP14]] +; SCALAR_TAIL_FOLDING-NEXT: [[TMP16:%.*]] = sext i32 [[TMP5]] to i64 +; SCALAR_TAIL_FOLDING-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[Q]], i64 [[TMP16]] +; SCALAR_TAIL_FOLDING-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 64 x i8> @llvm.vector.interleave4.nxv64i8(<vscale x 16 x i8> [[TMP12]], <vscale x 16 x i8> [[TMP13]], <vscale x 16 x i8> [[TMP14]], <vscale x 16 x i8> [[TMP15]]) +; SCALAR_TAIL_FOLDING-NEXT: [[INTERLEAVED_MASK3:%.*]] = call <vscale x 64 x i1> @llvm.vector.interleave4.nxv64i1(<vscale x 16 x i1> [[TMP4]], <vscale x 16 x i1> [[TMP4]], <vscale x 16 x i1> [[TMP4]], <vscale x 16 x i1> [[TMP4]]) +; SCALAR_TAIL_FOLDING-NEXT: call void @llvm.masked.store.nxv64i8.p0(<vscale x 64 x i8> [[INTERLEAVED_VEC]], ptr [[TMP17]], i32 1, <vscale x 64 x i1> [[INTERLEAVED_MASK3]]) +; SCALAR_TAIL_FOLDING-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP2]] ; SCALAR_TAIL_FOLDING-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT2]] -; SCALAR_TAIL_FOLDING-NEXT: [[TMP21:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; SCALAR_TAIL_FOLDING-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; SCALAR_TAIL_FOLDING-NEXT: [[TMP18:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] +; SCALAR_TAIL_FOLDING-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; SCALAR_TAIL_FOLDING: middle.block: ; SCALAR_TAIL_FOLDING-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N_MOD_VF]], 0 ; SCALAR_TAIL_FOLDING-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] @@ -506,46 +498,46 @@ define dso_local void @masked_strided_factor4(ptr noalias nocapture readonly %p, ; PREDICATED_TAIL_FOLDING-NEXT: [[TMP1:%.*]] = shl nuw i32 [[TMP0]], 4 ; PREDICATED_TAIL_FOLDING-NEXT: [[TMP2:%.*]] = call i32 @llvm.vscale.i32() ; PREDICATED_TAIL_FOLDING-NEXT: [[TMP3:%.*]] = shl nuw i32 [[TMP2]], 4 -; PREDICATED_TAIL_FOLDING-NEXT: [[TMPA:%.*]] = sub i32 1024, [[TMP3]] -; PREDICATED_TAIL_FOLDING-NEXT: [[TMPB:%.*]] = icmp ult i32 [[TMP2]], 64 -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP4:%.*]] = select i1 [[TMPB]], i32 [[TMPA]], i32 0 +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP4:%.*]] = sub i32 1024, [[TMP3]] +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP5:%.*]] = icmp ult i32 [[TMP2]], 64 +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], i32 [[TMP4]], i32 0 ; PREDICATED_TAIL_FOLDING-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32 0, i32 1024) ; PREDICATED_TAIL_FOLDING-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[CONV]], i64 0 ; PREDICATED_TAIL_FOLDING-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP5:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32() +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP7:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32() ; PREDICATED_TAIL_FOLDING-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP1]], i64 0 ; PREDICATED_TAIL_FOLDING-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer ; PREDICATED_TAIL_FOLDING-NEXT: br label [[VECTOR_BODY:%.*]] ; PREDICATED_TAIL_FOLDING: vector.body: ; PREDICATED_TAIL_FOLDING-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; PREDICATED_TAIL_FOLDING-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 16 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] -; PREDICATED_TAIL_FOLDING-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP5]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP6:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]] -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP7:%.*]] = select <vscale x 16 x i1> [[ACTIVE_LANE_MASK]], <vscale x 16 x i1> [[TMP6]], <vscale x 16 x i1> zeroinitializer -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP8:%.*]] = shl i32 [[INDEX]], 2 -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP9:%.*]] = sext i32 [[TMP8]] to i64 -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP9]] -; PREDICATED_TAIL_FOLDING-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 64 x i1> @llvm.vector.interleave4.nxv64i1(<vscale x 16 x i1> [[TMP7]], <vscale x 16 x i1> [[TMP7]], <vscale x 16 x i1> [[TMP7]], <vscale x 16 x i1> [[TMP7]]) -; PREDICATED_TAIL_FOLDING-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 64 x i8> @llvm.masked.load.nxv64i8.p0(ptr [[TMP10]], i32 1, <vscale x 64 x i1> [[INTERLEAVED_MASK]], <vscale x 64 x i8> poison) +; PREDICATED_TAIL_FOLDING-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP7]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP8:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]] +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP9:%.*]] = select <vscale x 16 x i1> [[ACTIVE_LANE_MASK]], <vscale x 16 x i1> [[TMP8]], <vscale x 16 x i1> zeroinitializer +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP10:%.*]] = shl i32 [[INDEX]], 2 +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP11:%.*]] = sext i32 [[TMP10]] to i64 +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP11]] +; PREDICATED_TAIL_FOLDING-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 64 x i1> @llvm.vector.interleave4.nxv64i1(<vscale x 16 x i1> [[TMP9]], <vscale x 16 x i1> [[TMP9]], <vscale x 16 x i1> [[TMP9]], <vscale x 16 x i1> [[TMP9]]) +; PREDICATED_TAIL_FOLDING-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 64 x i8> @llvm.masked.load.nxv64i8.p0(ptr [[TMP12]], i32 1, <vscale x 64 x i1> [[INTERLEAVED_MASK]], <vscale x 64 x i8> poison) ; PREDICATED_TAIL_FOLDING-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.vector.deinterleave4.nxv64i8(<vscale x 64 x i8> [[WIDE_MASKED_VEC]]) -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 0 -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 1 -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP13:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 2 -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP14:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 3 -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP15:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[TMP11]], <vscale x 16 x i8> [[TMP12]]) -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP16:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP15]] +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP13:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 0 +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP14:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 1 +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP15:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 2 +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP16:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 3 ; PREDICATED_TAIL_FOLDING-NEXT: [[TMP17:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[TMP13]], <vscale x 16 x i8> [[TMP14]]) ; PREDICATED_TAIL_FOLDING-NEXT: [[TMP18:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP17]] -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP19:%.*]] = sext i32 [[TMP8]] to i64 -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP20:%.*]] = getelementptr i8, ptr [[Q]], i64 [[TMP19]] -; PREDICATED_TAIL_FOLDING-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 64 x i8> @llvm.vector.interleave4.nxv64i8(<vscale x 16 x i8> [[TMP15]], <vscale x 16 x i8> [[TMP16]], <vscale x 16 x i8> [[TMP17]], <vscale x 16 x i8> [[TMP18]]) -; PREDICATED_TAIL_FOLDING-NEXT: [[INTERLEAVED_MASK3:%.*]] = call <vscale x 64 x i1> @llvm.vector.interleave4.nxv64i1(<vscale x 16 x i1> [[TMP7]], <vscale x 16 x i1> [[TMP7]], <vscale x 16 x i1> [[TMP7]], <vscale x 16 x i1> [[TMP7]]) -; PREDICATED_TAIL_FOLDING-NEXT: call void @llvm.masked.store.nxv64i8.p0(<vscale x 64 x i8> [[INTERLEAVED_VEC]], ptr [[TMP20]], i32 1, <vscale x 64 x i1> [[INTERLEAVED_MASK3]]) +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP19:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[TMP15]], <vscale x 16 x i8> [[TMP16]]) +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP20:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP19]] +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP21:%.*]] = sext i32 [[TMP10]] to i64 +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP22:%.*]] = getelementptr i8, ptr [[Q]], i64 [[TMP21]] +; PREDICATED_TAIL_FOLDING-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 64 x i8> @llvm.vector.interleave4.nxv64i8(<vscale x 16 x i8> [[TMP17]], <vscale x 16 x i8> [[TMP18]], <vscale x 16 x i8> [[TMP19]], <vscale x 16 x i8> [[TMP20]]) +; PREDICATED_TAIL_FOLDING-NEXT: [[INTERLEAVED_MASK3:%.*]] = call <vscale x 64 x i1> @llvm.vector.interleave4.nxv64i1(<vscale x 16 x i1> [[TMP9]], <vscale x 16 x i1> [[TMP9]], <vscale x 16 x i1> [[TMP9]], <vscale x 16 x i1> [[TMP9]]) +; PREDICATED_TAIL_FOLDING-NEXT: call void @llvm.masked.store.nxv64i8.p0(<vscale x 64 x i8> [[INTERLEAVED_VEC]], ptr [[TMP22]], i32 1, <vscale x 64 x i1> [[INTERLEAVED_MASK3]]) ; PREDICATED_TAIL_FOLDING-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], [[TMP1]] -; PREDICATED_TAIL_FOLDING-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32 [[INDEX]], i32 [[TMP4]]) +; PREDICATED_TAIL_FOLDING-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32 [[INDEX]], i32 [[TMP6]]) ; PREDICATED_TAIL_FOLDING-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT2]] -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP21:%.*]] = extractelement <vscale x 16 x i1> [[ACTIVE_LANE_MASK_NEXT]], i64 0 -; PREDICATED_TAIL_FOLDING-NEXT: br i1 [[TMP21]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP8:![0-9]+]] +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP23:%.*]] = extractelement <vscale x 16 x i1> [[ACTIVE_LANE_MASK_NEXT]], i64 0 +; PREDICATED_TAIL_FOLDING-NEXT: br i1 [[TMP23]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP8:![0-9]+]] ; PREDICATED_TAIL_FOLDING: middle.block: ; PREDICATED_TAIL_FOLDING-NEXT: br label [[FOR_END:%.*]] ; PREDICATED_TAIL_FOLDING: scalar.ph: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-inv-store.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-inv-store.ll index f0675a4..c8bbbdc 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-inv-store.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-inv-store.ll @@ -15,8 +15,6 @@ define void @inv_store_i16(ptr noalias %dst, ptr noalias readonly %src, i64 %N) ; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -27,7 +25,7 @@ define void @inv_store_i16(ptr noalias %dst, ptr noalias readonly %src, i64 %N) ; CHECK-NEXT: [[TMP11:%.*]] = sub i32 [[TMP10]], 1 ; CHECK-NEXT: [[TMP12:%.*]] = extractelement <vscale x 4 x i16> [[WIDE_LOAD]], i32 [[TMP11]] ; CHECK-NEXT: store i16 [[TMP12]], ptr [[DST:%.*]], align 2 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: @@ -64,8 +62,6 @@ define void @cond_inv_store_i32(ptr noalias %dst, ptr noalias readonly %src, i64 ; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x ptr> poison, ptr [[DST:%.*]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x ptr> [[BROADCAST_SPLATINSERT]], <vscale x 4 x ptr> poison, <vscale x 4 x i32> zeroinitializer ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] @@ -75,7 +71,7 @@ define void @cond_inv_store_i32(ptr noalias %dst, ptr noalias readonly %src, i64 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 4 ; CHECK-NEXT: [[TMP9:%.*]] = icmp sgt <vscale x 4 x i32> [[WIDE_LOAD]], zeroinitializer ; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x ptr> [[BROADCAST_SPLAT]], i32 4, <vscale x 4 x i1> [[TMP9]]) -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-live-out-pointer-induction.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-live-out-pointer-induction.ll index 2b4aad1..76f33cf 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-live-out-pointer-induction.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-live-out-pointer-induction.ll @@ -19,8 +19,6 @@ define ptr @test(ptr %start.1, ptr %start.2, ptr %end) { ; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP3]], [[TMP7]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF]] -; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 4 ; CHECK-NEXT: [[TMP8:%.*]] = mul i64 [[N_VEC]], 8 ; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[START_1:%.*]], i64 [[TMP8]] ; CHECK-NEXT: [[TMP9:%.*]] = mul i64 [[N_VEC]], 8 @@ -35,7 +33,7 @@ define ptr @test(ptr %start.1, ptr %start.2, ptr %end) { ; CHECK-NEXT: [[TMP35:%.*]] = getelementptr i64, ptr [[TMP30]], i64 [[TMP34]] ; CHECK-NEXT: store <vscale x 2 x i64> zeroinitializer, ptr [[TMP30]], align 8 ; CHECK-NEXT: store <vscale x 2 x i64> zeroinitializer, ptr [[TMP35]], align 8 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP11]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]] ; CHECK-NEXT: [[TMP36:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP36]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-low-trip-count.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-low-trip-count.ll index 2b01018..49f9870 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-low-trip-count.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-low-trip-count.ll @@ -9,12 +9,6 @@ define void @trip7_i64(ptr noalias nocapture noundef %dst, ptr noalias nocapture ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; CHECK-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 -; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 7, [[TMP2]] -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2 ; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 7) diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-multiexit.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-multiexit.ll index 993c048..26a1649 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-multiexit.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-multiexit.ll @@ -33,8 +33,6 @@ define void @multiple_exits_unique_exit_block(ptr %A, ptr %B, i32 %N) #0 { ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i32 [[N_MOD_VF]], 0 ; CHECK-NEXT: [[TMP10:%.*]] = select i1 [[TMP9]], i32 [[TMP8]], i32 [[N_MOD_VF]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP0]], [[TMP10]] -; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.vscale.i32() -; CHECK-NEXT: [[TMP12:%.*]] = mul nuw i32 [[TMP11]], 8 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -50,7 +48,7 @@ define void @multiple_exits_unique_exit_block(ptr %A, ptr %B, i32 %N) #0 { ; CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds i32, ptr [[TMP25]], i64 [[TMP29]] ; CHECK-NEXT: store <vscale x 4 x i32> [[WIDE_LOAD]], ptr [[TMP25]], align 4 ; CHECK-NEXT: store <vscale x 4 x i32> [[WIDE_LOAD3]], ptr [[TMP30]], align 4 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP12]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP8]] ; CHECK-NEXT: [[TMP31:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP31]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: @@ -105,8 +103,6 @@ define i32 @multiple_exits_multiple_exit_blocks(ptr %A, ptr %B, i32 %N) #0 { ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i32 [[N_MOD_VF]], 0 ; CHECK-NEXT: [[TMP10:%.*]] = select i1 [[TMP9]], i32 [[TMP8]], i32 [[N_MOD_VF]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP0]], [[TMP10]] -; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.vscale.i32() -; CHECK-NEXT: [[TMP12:%.*]] = mul nuw i32 [[TMP11]], 8 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -122,7 +118,7 @@ define i32 @multiple_exits_multiple_exit_blocks(ptr %A, ptr %B, i32 %N) #0 { ; CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds i32, ptr [[TMP25]], i64 [[TMP29]] ; CHECK-NEXT: store <vscale x 4 x i32> [[WIDE_LOAD]], ptr [[TMP25]], align 4 ; CHECK-NEXT: store <vscale x 4 x i32> [[WIDE_LOAD3]], ptr [[TMP30]], align 4 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP12]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP8]] ; CHECK-NEXT: [[TMP31:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP31]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-runtime-check-size-based-threshold.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-runtime-check-size-based-threshold.ll index 893ebef..b8f44f6 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-runtime-check-size-based-threshold.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-runtime-check-size-based-threshold.ll @@ -46,8 +46,6 @@ define void @min_trip_count_due_to_runtime_checks_1(ptr %dst.1, ptr %dst.2, ptr ; CHECK-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP15]], 4 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[UMAX]], [[TMP16]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[UMAX]], [[N_MOD_VF]] -; CHECK-NEXT: [[TMP49:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP50:%.*]] = mul nuw i64 [[TMP49]], 4 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -77,7 +75,7 @@ define void @min_trip_count_due_to_runtime_checks_1(ptr %dst.1, ptr %dst.2, ptr ; CHECK-NEXT: [[TMP48:%.*]] = getelementptr i64, ptr [[TMP45]], i64 [[TMP47]] ; CHECK-NEXT: store <vscale x 2 x i64> [[TMP35]], ptr [[TMP45]], align 8 ; CHECK-NEXT: store <vscale x 2 x i64> [[TMP36]], ptr [[TMP48]], align 8 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP50]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP16]] ; CHECK-NEXT: [[TMP51:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP51]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-forced.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-forced.ll index 1cda568..10fe67d 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-forced.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-forced.ll @@ -46,12 +46,6 @@ define void @simple_memset(i32 %val, ptr %ptr, i64 %n) #0 { ; CHECK-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[N:%.*]], i64 1) ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[TMP1]], 1 -; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[UMAX]], [[TMP4]] -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP14:%.*]] = mul nuw i64 [[TMP13]], 4 ; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-optsize.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-optsize.ll index fb0447b..ed2c5cd 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-optsize.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-optsize.ll @@ -10,12 +10,6 @@ define void @trip1025_i64(ptr noalias nocapture noundef %dst, ptr noalias nocapt ; CHECK: vector.ph: ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[TMP1]], 1 -; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP4]] -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] -; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2 ; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1025) ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: @@ -28,7 +22,7 @@ define void @trip1025_i64(ptr noalias nocapture noundef %dst, ptr noalias nocapt ; CHECK-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr [[TMP11]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x i64> poison) ; CHECK-NEXT: [[TMP13:%.*]] = add nsw <vscale x 2 x i64> [[WIDE_MASKED_LOAD1]], [[TMP10]] ; CHECK-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP13]], ptr [[TMP11]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]]) -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP6]] +; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]] ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX_NEXT]], i64 1025) ; CHECK-NEXT: [[TMP14:%.*]] = xor <vscale x 2 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) ; CHECK-NEXT: [[TMP15:%.*]] = extractelement <vscale x 2 x i1> [[TMP14]], i32 0 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-reductions.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-reductions.ll index d6f8b8e..6b1b04a 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-reductions.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-reductions.ll @@ -12,12 +12,6 @@ define i32 @add_reduction_i32(ptr %ptr, i64 %n) #0 { ; CHECK-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[N:%.*]], i64 1) ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[TMP1]], 1 -; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[UMAX]], [[TMP4]] -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP15]], 4 ; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() @@ -65,12 +59,6 @@ define i32 @add_reduction_i32(ptr %ptr, i64 %n) #0 { ; CHECK-IN-LOOP-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[N:%.*]], i64 1) ; CHECK-IN-LOOP-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-IN-LOOP: vector.ph: -; CHECK-IN-LOOP-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-IN-LOOP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; CHECK-IN-LOOP-NEXT: [[TMP4:%.*]] = sub i64 [[TMP1]], 1 -; CHECK-IN-LOOP-NEXT: [[N_RND_UP:%.*]] = add i64 [[UMAX]], [[TMP4]] -; CHECK-IN-LOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; CHECK-IN-LOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; CHECK-IN-LOOP-NEXT: [[TMP16:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-IN-LOOP-NEXT: [[TMP17:%.*]] = mul nuw i64 [[TMP16]], 4 ; CHECK-IN-LOOP-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() @@ -136,12 +124,6 @@ define float @add_reduction_f32(ptr %ptr, i64 %n) #0 { ; CHECK-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[N:%.*]], i64 1) ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[TMP1]], 1 -; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[UMAX]], [[TMP4]] -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP15]], 4 ; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() @@ -188,12 +170,6 @@ define float @add_reduction_f32(ptr %ptr, i64 %n) #0 { ; CHECK-IN-LOOP-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[N:%.*]], i64 1) ; CHECK-IN-LOOP-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-IN-LOOP: vector.ph: -; CHECK-IN-LOOP-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-IN-LOOP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; CHECK-IN-LOOP-NEXT: [[TMP4:%.*]] = sub i64 [[TMP1]], 1 -; CHECK-IN-LOOP-NEXT: [[N_RND_UP:%.*]] = add i64 [[UMAX]], [[TMP4]] -; CHECK-IN-LOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; CHECK-IN-LOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; CHECK-IN-LOOP-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-IN-LOOP-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP15]], 4 ; CHECK-IN-LOOP-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() @@ -257,17 +233,11 @@ define i32 @cond_xor_reduction(ptr noalias %a, ptr noalias %cond, i64 %N) #0 { ; CHECK-NEXT: entry: ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[TMP1]], 1 -; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP4]] -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP21:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP22:%.*]] = mul nuw i64 [[TMP21]], 4 ; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4 -; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[N]], [[TMP6]] +; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[N:%.*]], [[TMP6]] ; CHECK-NEXT: [[TMP8:%.*]] = icmp ugt i64 [[N]], [[TMP6]] ; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], i64 [[TMP7]], i64 0 ; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 [[N]]) @@ -322,17 +292,11 @@ define i32 @cond_xor_reduction(ptr noalias %a, ptr noalias %cond, i64 %N) #0 { ; CHECK-IN-LOOP-NEXT: entry: ; CHECK-IN-LOOP-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-IN-LOOP: vector.ph: -; CHECK-IN-LOOP-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-IN-LOOP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; CHECK-IN-LOOP-NEXT: [[TMP4:%.*]] = sub i64 [[TMP1]], 1 -; CHECK-IN-LOOP-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP4]] -; CHECK-IN-LOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; CHECK-IN-LOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; CHECK-IN-LOOP-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-IN-LOOP-NEXT: [[TMP21:%.*]] = mul nuw i64 [[TMP20]], 4 ; CHECK-IN-LOOP-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-IN-LOOP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4 -; CHECK-IN-LOOP-NEXT: [[TMP7:%.*]] = sub i64 [[N]], [[TMP6]] +; CHECK-IN-LOOP-NEXT: [[TMP7:%.*]] = sub i64 [[N:%.*]], [[TMP6]] ; CHECK-IN-LOOP-NEXT: [[TMP8:%.*]] = icmp ugt i64 [[N]], [[TMP6]] ; CHECK-IN-LOOP-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], i64 [[TMP7]], i64 0 ; CHECK-IN-LOOP-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 [[N]]) diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-unroll.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-unroll.ll index 4ec7d4d..01b864b 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-unroll.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-unroll.ll @@ -10,12 +10,6 @@ define void @simple_memset(i32 %val, ptr %ptr, i64 %n) #0 { ; CHECK-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[N:%.*]], i64 1) ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 16 -; CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[TMP1]], 1 -; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[UMAX]], [[TMP4]] -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP61:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP62:%.*]] = mul nuw i64 [[TMP61]], 16 ; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() @@ -101,12 +95,6 @@ define void @cond_memset(i32 %val, ptr noalias readonly %cond_ptr, ptr noalias % ; CHECK-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[N:%.*]], i64 1) ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 16 -; CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[TMP1]], 1 -; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[UMAX]], [[TMP4]] -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP83:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP84:%.*]] = mul nuw i64 [[TMP83]], 16 ; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding.ll index 672523e..e996535 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding.ll @@ -12,12 +12,6 @@ define void @simple_memset(i32 %val, ptr %ptr, i64 %n) #0 { ; CHECK: vector.ph: ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; CHECK-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 -; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[UMAX]], [[TMP2]] -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] -; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 ; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4 ; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[UMAX]], [[TMP6]] @@ -32,7 +26,7 @@ define void @simple_memset(i32 %val, ptr %ptr, i64 %n) #0 { ; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 4 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[PTR:%.*]], i64 [[INDEX1]] ; CHECK-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[BROADCAST_SPLAT]], ptr [[TMP11]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]]) -; CHECK-NEXT: [[INDEX_NEXT2]] = add i64 [[INDEX1]], [[TMP4]] +; CHECK-NEXT: [[INDEX_NEXT2]] = add i64 [[INDEX1]], [[TMP1]] ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX1]], i64 [[TMP9]]) ; CHECK-NEXT: [[TMP13:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) ; CHECK-NEXT: [[TMP14:%.*]] = extractelement <vscale x 4 x i1> [[TMP13]], i32 0 @@ -63,9 +57,6 @@ define void @simple_memset_v4i32(i32 %val, ptr %ptr, i64 %n) #0 { ; CHECK-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[N:%.*]], i64 1) ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[UMAX]], 3 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 4 -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP0:%.*]] = sub i64 [[UMAX]], 4 ; CHECK-NEXT: [[TMP1:%.*]] = icmp ugt i64 [[UMAX]], 4 ; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i64 [[TMP0]], i64 0 @@ -111,12 +102,6 @@ define void @simple_memcpy(ptr noalias %dst, ptr noalias %src, i64 %n) #0 { ; CHECK: vector.ph: ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; CHECK-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 -; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[UMAX]], [[TMP2]] -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] -; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 ; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4 ; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[UMAX]], [[TMP6]] @@ -131,7 +116,7 @@ define void @simple_memcpy(ptr noalias %dst, ptr noalias %src, i64 %n) #0 { ; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[TMP11]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i32> poison) ; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i32, ptr [[DST:%.*]], i64 [[INDEX1]] ; CHECK-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[WIDE_MASKED_LOAD]], ptr [[TMP13]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]]) -; CHECK-NEXT: [[INDEX_NEXT2]] = add i64 [[INDEX1]], [[TMP4]] +; CHECK-NEXT: [[INDEX_NEXT2]] = add i64 [[INDEX1]], [[TMP1]] ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX1]], i64 [[TMP9]]) ; CHECK-NEXT: [[TMP15:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) ; CHECK-NEXT: [[TMP16:%.*]] = extractelement <vscale x 4 x i1> [[TMP15]], i32 0 @@ -169,12 +154,6 @@ define void @copy_stride4(ptr noalias %dst, ptr noalias %src, i64 %n) #0 { ; CHECK: vector.ph: ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 -; CHECK-NEXT: [[TMP5:%.*]] = sub i64 [[TMP4]], 1 -; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP2]], [[TMP5]] -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP4]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] -; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4 ; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 4 ; CHECK-NEXT: [[TMP10:%.*]] = sub i64 [[TMP2]], [[TMP9]] @@ -184,7 +163,7 @@ define void @copy_stride4(ptr noalias %dst, ptr noalias %src, i64 %n) #0 { ; CHECK-NEXT: [[TMP13:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64() ; CHECK-NEXT: [[TMP15:%.*]] = mul <vscale x 4 x i64> [[TMP13]], splat (i64 4) ; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP15]] -; CHECK-NEXT: [[TMP18:%.*]] = mul i64 4, [[TMP7]] +; CHECK-NEXT: [[TMP18:%.*]] = mul i64 4, [[TMP4]] ; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP18]], i64 0 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] @@ -196,7 +175,7 @@ define void @copy_stride4(ptr noalias %dst, ptr noalias %src, i64 %n) #0 { ; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[TMP19]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i32> poison) ; CHECK-NEXT: [[TMP20:%.*]] = getelementptr i32, ptr [[DST:%.*]], <vscale x 4 x i64> [[VEC_IND]] ; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[WIDE_MASKED_GATHER]], <vscale x 4 x ptr> [[TMP20]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]]) -; CHECK-NEXT: [[INDEX_NEXT2]] = add i64 [[INDEX1]], [[TMP7]] +; CHECK-NEXT: [[INDEX_NEXT2]] = add i64 [[INDEX1]], [[TMP4]] ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX1]], i64 [[TMP12]]) ; CHECK-NEXT: [[TMP21:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]] @@ -232,12 +211,6 @@ define void @simple_gather_scatter(ptr noalias %dst, ptr noalias %src, ptr noali ; CHECK: vector.ph: ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; CHECK-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 -; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[UMAX]], [[TMP2]] -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] -; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 ; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4 ; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[UMAX]], [[TMP6]] @@ -254,7 +227,7 @@ define void @simple_gather_scatter(ptr noalias %dst, ptr noalias %src, ptr noali ; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[TMP13]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i32> poison) ; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i32, ptr [[DST:%.*]], <vscale x 4 x i32> [[WIDE_MASKED_LOAD]] ; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[WIDE_MASKED_GATHER]], <vscale x 4 x ptr> [[TMP14]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]]) -; CHECK-NEXT: [[INDEX_NEXT2]] = add i64 [[INDEX1]], [[TMP4]] +; CHECK-NEXT: [[INDEX_NEXT2]] = add i64 [[INDEX1]], [[TMP1]] ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX1]], i64 [[TMP9]]) ; CHECK-NEXT: [[TMP15:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) ; CHECK-NEXT: [[TMP16:%.*]] = extractelement <vscale x 4 x i1> [[TMP15]], i32 0 @@ -292,15 +265,9 @@ define void @uniform_load(ptr noalias %dst, ptr noalias readonly %src, i64 %n) # ; CHECK: vector.ph: ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; CHECK-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 -; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP2]] -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] -; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 ; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4 -; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[N]], [[TMP6]] +; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[N:%.*]], [[TMP6]] ; CHECK-NEXT: [[TMP8:%.*]] = icmp ugt i64 [[N]], [[TMP6]] ; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], i64 [[TMP7]], i64 0 ; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 [[N]]) @@ -313,7 +280,7 @@ define void @uniform_load(ptr noalias %dst, ptr noalias readonly %src, i64 %n) # ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer ; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[DST:%.*]], i64 [[INDEX]] ; CHECK-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[BROADCAST_SPLAT]], ptr [[TMP12]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]]) -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP4]] +; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]] ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP9]]) ; CHECK-NEXT: [[TMP14:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) ; CHECK-NEXT: [[TMP15:%.*]] = extractelement <vscale x 4 x i1> [[TMP14]], i32 0 @@ -351,15 +318,9 @@ define void @cond_uniform_load(ptr noalias %dst, ptr noalias readonly %src, ptr ; CHECK: vector.ph: ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; CHECK-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 -; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP2]] -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] -; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 ; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4 -; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[N]], [[TMP6]] +; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[N:%.*]], [[TMP6]] ; CHECK-NEXT: [[TMP8:%.*]] = icmp ugt i64 [[N]], [[TMP6]] ; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], i64 [[TMP7]], i64 0 ; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 [[N]]) @@ -377,7 +338,7 @@ define void @cond_uniform_load(ptr noalias %dst, ptr noalias readonly %src, ptr ; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 4 x i1> [[TMP15]], <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], <vscale x 4 x i32> zeroinitializer ; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[DST:%.*]], i64 [[INDEX1]] ; CHECK-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[PREDPHI]], ptr [[TMP16]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]]) -; CHECK-NEXT: [[INDEX_NEXT2]] = add i64 [[INDEX1]], [[TMP4]] +; CHECK-NEXT: [[INDEX_NEXT2]] = add i64 [[INDEX1]], [[TMP1]] ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX1]], i64 [[TMP9]]) ; CHECK-NEXT: [[TMP18:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) ; CHECK-NEXT: [[TMP19:%.*]] = extractelement <vscale x 4 x i1> [[TMP18]], i32 0 @@ -423,15 +384,9 @@ define void @uniform_store(ptr noalias %dst, ptr noalias readonly %src, i64 %n) ; CHECK: vector.ph: ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; CHECK-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 -; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP2]] -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] -; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 ; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4 -; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[N]], [[TMP6]] +; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[N:%.*]], [[TMP6]] ; CHECK-NEXT: [[TMP8:%.*]] = icmp ugt i64 [[N]], [[TMP6]] ; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], i64 [[TMP7]], i64 0 ; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 [[N]]) @@ -444,7 +399,7 @@ define void @uniform_store(ptr noalias %dst, ptr noalias readonly %src, i64 %n) ; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[SRC:%.*]], i64 [[INDEX]] ; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[TMP11]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i32> poison) ; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[WIDE_MASKED_LOAD]], <vscale x 4 x ptr> [[BROADCAST_SPLAT]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]]) -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP4]] +; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]] ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP9]]) ; CHECK-NEXT: [[TMP13:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) ; CHECK-NEXT: [[TMP14:%.*]] = extractelement <vscale x 4 x i1> [[TMP13]], i32 0 @@ -479,12 +434,6 @@ define void @simple_fdiv(ptr noalias %dst, ptr noalias %src, i64 %n) #0 { ; CHECK: vector.ph: ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; CHECK-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 -; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[UMAX]], [[TMP2]] -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] -; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 ; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4 ; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[UMAX]], [[TMP6]] @@ -501,7 +450,7 @@ define void @simple_fdiv(ptr noalias %dst, ptr noalias %src, i64 %n) #0 { ; CHECK-NEXT: [[WIDE_MASKED_LOAD2:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP12]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x float> poison) ; CHECK-NEXT: [[TMP15:%.*]] = fdiv <vscale x 4 x float> [[WIDE_MASKED_LOAD]], [[WIDE_MASKED_LOAD2]] ; CHECK-NEXT: call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> [[TMP15]], ptr [[TMP12]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]]) -; CHECK-NEXT: [[INDEX_NEXT3]] = add i64 [[INDEX1]], [[TMP4]] +; CHECK-NEXT: [[INDEX_NEXT3]] = add i64 [[INDEX1]], [[TMP1]] ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX1]], i64 [[TMP9]]) ; CHECK-NEXT: [[TMP16:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) ; CHECK-NEXT: [[TMP17:%.*]] = extractelement <vscale x 4 x i1> [[TMP16]], i32 0 @@ -539,12 +488,6 @@ define void @simple_idiv(ptr noalias %dst, ptr noalias %src, i64 %n) #0 { ; CHECK: vector.ph: ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; CHECK-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 -; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[UMAX]], [[TMP2]] -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] -; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 ; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4 ; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[UMAX]], [[TMP6]] @@ -562,7 +505,7 @@ define void @simple_idiv(ptr noalias %dst, ptr noalias %src, i64 %n) #0 { ; CHECK-NEXT: [[TMP15:%.*]] = select <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i32> [[WIDE_MASKED_LOAD2]], <vscale x 4 x i32> splat (i32 1) ; CHECK-NEXT: [[TMP16:%.*]] = udiv <vscale x 4 x i32> [[WIDE_MASKED_LOAD]], [[TMP15]] ; CHECK-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP16]], ptr [[TMP12]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]]) -; CHECK-NEXT: [[INDEX_NEXT3]] = add i64 [[INDEX1]], [[TMP4]] +; CHECK-NEXT: [[INDEX_NEXT3]] = add i64 [[INDEX1]], [[TMP1]] ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX1]], i64 [[TMP9]]) ; CHECK-NEXT: [[TMP17:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) ; CHECK-NEXT: [[TMP18:%.*]] = extractelement <vscale x 4 x i1> [[TMP17]], i32 0 @@ -601,8 +544,6 @@ define void @simple_memset_trip1024(i32 %val, ptr %ptr, i64 %n) #0 { ; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] -; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[VAL:%.*]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] @@ -610,7 +551,7 @@ define void @simple_memset_trip1024(i32 %val, ptr %ptr, i64 %n) #0 { ; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT2:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i32, ptr [[PTR:%.*]], i64 [[INDEX1]] ; CHECK-NEXT: store <vscale x 4 x i32> [[BROADCAST_SPLAT]], ptr [[TMP7]], align 4 -; CHECK-NEXT: [[INDEX_NEXT2]] = add nuw i64 [[INDEX1]], [[TMP5]] +; CHECK-NEXT: [[INDEX_NEXT2]] = add nuw i64 [[INDEX1]], [[TMP3]] ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT2]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] ; CHECK: middle.block: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-vector-reverse.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-vector-reverse.ll index 33fa360..9989209 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-vector-reverse.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-vector-reverse.ll @@ -19,13 +19,11 @@ define void @vector_reverse_f64(i64 %N, ptr noalias %a, ptr noalias %b) #0{ ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = shl nuw i64 [[TMP2]], 4 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub nsw i64 [[N]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = shl nuw i64 [[TMP4]], 3 ; CHECK-NEXT: [[TMP6:%.*]] = shl i64 [[TMP4]], 4 +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP6]] +; CHECK-NEXT: [[N_VEC:%.*]] = sub nsw i64 [[N]], [[N_MOD_VF]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -98,13 +96,11 @@ define void @vector_reverse_i64(i64 %N, ptr %a, ptr %b) #0 { ; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP4]], [[TMP3]] ; CHECK-NEXT: br i1 [[DIFF_CHECK]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP6:%.*]] = shl nuw i64 [[TMP5]], 4 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP6]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub nsw i64 [[N]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP8:%.*]] = shl nuw i64 [[TMP7]], 3 ; CHECK-NEXT: [[TMP9:%.*]] = shl i64 [[TMP7]], 4 +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP9]] +; CHECK-NEXT: [[N_VEC:%.*]] = sub nsw i64 [[N]], [[N_MOD_VF]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-vscale-based-trip-counts.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-vscale-based-trip-counts.ll index 352f4fe..db941a3 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-vscale-based-trip-counts.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-vscale-based-trip-counts.ll @@ -15,8 +15,6 @@ define void @vscale_mul_4(ptr noalias noundef readonly captures(none) %a, ptr no ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP1]], [[TMP5]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP1]], [[N_MOD_VF]] -; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[A]], align 4 ; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x float>, ptr [[B]], align 4 ; CHECK-NEXT: [[TMP10:%.*]] = fmul <vscale x 4 x float> [[WIDE_LOAD]], [[WIDE_LOAD1]] @@ -70,8 +68,6 @@ define void @vscale_mul_8(ptr noalias noundef readonly captures(none) %a, ptr n ; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 8 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[MUL1]], [[TMP4]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[MUL1]], [[N_MOD_VF]] -; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 8 ; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 ; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[TMP10]] @@ -141,8 +137,6 @@ define void @vscale_mul_12(ptr noalias noundef readonly captures(none) %a, ptr n ; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[MUL1]], [[TMP4]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[MUL1]], [[N_MOD_VF]] -; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4 ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -152,7 +146,7 @@ define void @vscale_mul_12(ptr noalias noundef readonly captures(none) %a, ptr n ; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x float>, ptr [[TMP9]], align 4 ; CHECK-NEXT: [[TMP11:%.*]] = fmul <vscale x 4 x float> [[WIDE_LOAD]], [[WIDE_LOAD1]] ; CHECK-NEXT: store <vscale x 4 x float> [[TMP11]], ptr [[TMP9]], align 4 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]] ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: @@ -211,8 +205,6 @@ define void @vscale_mul_31(ptr noalias noundef readonly captures(none) %a, ptr n ; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 8 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[MUL1]], [[TMP4]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[MUL1]], [[N_MOD_VF]] -; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 8 ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -235,7 +227,7 @@ define void @vscale_mul_31(ptr noalias noundef readonly captures(none) %a, ptr n ; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw float, ptr [[TMP12]], i64 [[TMP20]] ; CHECK-NEXT: store <vscale x 4 x float> [[TMP17]], ptr [[TMP12]], align 4 ; CHECK-NEXT: store <vscale x 4 x float> [[TMP18]], ptr [[TMP21]], align 4 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]] ; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: @@ -294,8 +286,6 @@ define void @vscale_mul_64(ptr noalias noundef readonly captures(none) %a, ptr n ; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 8 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[MUL1]], [[TMP4]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[MUL1]], [[N_MOD_VF]] -; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 8 ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -318,7 +308,7 @@ define void @vscale_mul_64(ptr noalias noundef readonly captures(none) %a, ptr n ; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw float, ptr [[TMP12]], i64 [[TMP20]] ; CHECK-NEXT: store <vscale x 4 x float> [[TMP17]], ptr [[TMP12]], align 4 ; CHECK-NEXT: store <vscale x 4 x float> [[TMP18]], ptr [[TMP21]], align 4 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]] ; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: @@ -379,8 +369,6 @@ define void @trip_count_with_overflow(ptr noalias noundef readonly captures(none ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP1]], [[TMP5]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP1]], [[N_MOD_VF]] -; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 8 ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -403,7 +391,7 @@ define void @trip_count_with_overflow(ptr noalias noundef readonly captures(none ; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw float, ptr [[TMP13]], i64 [[TMP21]] ; CHECK-NEXT: store <vscale x 4 x float> [[TMP18]], ptr [[TMP13]], align 4 ; CHECK-NEXT: store <vscale x 4 x float> [[TMP19]], ptr [[TMP22]], align 4 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] ; CHECK-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP23]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: @@ -460,8 +448,6 @@ define void @trip_count_too_big_for_element_count(ptr noalias noundef readonly c ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP1]], [[TMP5]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP1]], [[N_MOD_VF]] -; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 8 ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -484,7 +470,7 @@ define void @trip_count_too_big_for_element_count(ptr noalias noundef readonly c ; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw float, ptr [[TMP13]], i64 [[TMP21]] ; CHECK-NEXT: store <vscale x 4 x float> [[TMP18]], ptr [[TMP13]], align 4 ; CHECK-NEXT: store <vscale x 4 x float> [[TMP19]], ptr [[TMP22]], align 4 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] ; CHECK-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP23]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-extractvalue.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-extractvalue.ll index e214e82..b007db9 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-extractvalue.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-extractvalue.ll @@ -16,8 +16,6 @@ define void @widen_extractvalue(ptr %dst, {i64, i64} %sv) #0 { ; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i32 [[TMP2]], 2 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 1000, [[TMP3]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 1000, [[N_MOD_VF]] -; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vscale.i32() -; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i32 [[TMP4]], 2 ; CHECK-NEXT: [[EXTRACT0:%.*]] = extractvalue { i64, i64 } [[SV]], 0 ; CHECK-NEXT: [[DOTSPLATINSERT1:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[EXTRACT0]], i64 0 ; CHECK-NEXT: [[DOTSPLAT2:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT1]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer @@ -30,7 +28,7 @@ define void @widen_extractvalue(ptr %dst, {i64, i64} %sv) #0 { ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[DST]], i32 [[INDEX]] ; CHECK-NEXT: store <vscale x 2 x i64> [[TMP7]], ptr [[TMP8]], align 8 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP5]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP3]] ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-gep.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-gep.ll index 5c6328e..1012c10 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-gep.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-gep.ll @@ -26,8 +26,6 @@ define void @pointer_induction_used_as_vector(ptr noalias %start.1, ptr noalias ; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2 ; CHECK-NEXT: [[TMP4:%.*]] = mul i64 [[N_VEC]], 8 ; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[START_1:%.*]], i64 [[TMP4]] ; CHECK-NEXT: [[IND_END2:%.*]] = getelementptr i8, ptr [[START_2:%.*]], i64 [[N_VEC]] @@ -35,19 +33,19 @@ define void @pointer_induction_used_as_vector(ptr noalias %start.1, ptr noalias ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[START_2]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP13:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64() -; CHECK-NEXT: [[TMP15:%.*]] = mul <vscale x 2 x i64> [[TMP13]], splat (i64 1) -; CHECK-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 2 x i64> [[TMP15]] +; CHECK-NEXT: [[TMP7:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64() +; CHECK-NEXT: [[TMP8:%.*]] = mul <vscale x 2 x i64> [[TMP7]], splat (i64 1) +; CHECK-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 2 x i64> [[TMP8]] ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 8 ; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START_1]], i64 [[OFFSET_IDX]] -; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds i8, <vscale x 2 x ptr> [[VECTOR_GEP]], i64 1 -; CHECK-NEXT: store <vscale x 2 x ptr> [[TMP16]], ptr [[NEXT_GEP]], align 8 -; CHECK-NEXT: [[TMP18:%.*]] = extractelement <vscale x 2 x ptr> [[VECTOR_GEP]], i32 0 -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i8>, ptr [[TMP18]], align 1 -; CHECK-NEXT: [[TMP20:%.*]] = add <vscale x 2 x i8> [[WIDE_LOAD]], splat (i8 1) -; CHECK-NEXT: store <vscale x 2 x i8> [[TMP20]], ptr [[TMP18]], align 1 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] -; CHECK-NEXT: [[TMP11:%.*]] = mul i64 1, [[TMP6]] +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, <vscale x 2 x ptr> [[VECTOR_GEP]], i64 1 +; CHECK-NEXT: store <vscale x 2 x ptr> [[TMP9]], ptr [[NEXT_GEP]], align 8 +; CHECK-NEXT: [[TMP10:%.*]] = extractelement <vscale x 2 x ptr> [[VECTOR_GEP]], i32 0 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i8>, ptr [[TMP10]], align 1 +; CHECK-NEXT: [[TMP12:%.*]] = add <vscale x 2 x i8> [[WIDE_LOAD]], splat (i8 1) +; CHECK-NEXT: store <vscale x 2 x i8> [[TMP12]], ptr [[TMP10]], align 1 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] +; CHECK-NEXT: [[TMP11:%.*]] = mul i64 1, [[TMP3]] ; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP11]] ; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] @@ -112,8 +110,6 @@ define void @pointer_induction(ptr noalias %start, i64 %N) { ; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP4]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]] -; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2 ; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[START:%.*]], i64 [[N_VEC]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: @@ -126,8 +122,8 @@ define void @pointer_induction(ptr noalias %start, i64 %N) { ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i8>, ptr [[TMP15]], align 1 ; CHECK-NEXT: [[TMP17:%.*]] = add <vscale x 2 x i8> [[WIDE_LOAD]], splat (i8 1) ; CHECK-NEXT: store <vscale x 2 x i8> [[TMP17]], ptr [[TMP15]], align 1 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX2]], [[TMP6]] -; CHECK-NEXT: [[TMP10:%.*]] = mul i64 1, [[TMP6]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX2]], [[TMP4]] +; CHECK-NEXT: [[TMP10:%.*]] = mul i64 1, [[TMP4]] ; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP10]] ; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll index 2c88e0e..11eef23 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll @@ -22,11 +22,10 @@ define void @widen_ptr_phi_unrolled(ptr noalias nocapture %a, ptr noalias nocapt ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -8 -; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i64 [[TMP4]], 3 +; CHECK-NEXT: [[DOTNOT:%.*]] = sub nsw i64 0, [[TMP5]] +; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNOT]] ; CHECK-NEXT: [[TMP26:%.*]] = shl i64 [[N_VEC]], 3 ; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[C:%.*]], i64 [[TMP26]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] @@ -134,11 +133,10 @@ define void @widen_2ptrs_phi_unrolled(ptr noalias nocapture %dst, ptr noalias no ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -8 -; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]] ; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP6:%.*]] = shl nuw nsw i64 [[TMP5]], 3 +; CHECK-NEXT: [[DOTNOT:%.*]] = sub nsw i64 0, [[TMP6]] +; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNOT]] ; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[N_VEC]], 2 ; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[SRC:%.*]], i64 [[TMP3]] ; CHECK-NEXT: [[TMP4:%.*]] = shl i64 [[N_VEC]], 2 @@ -225,11 +223,10 @@ define i32 @pointer_iv_mixed(ptr noalias %a, ptr noalias %b, i64 %n) #0 { ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp samesign ult i64 [[SMAX]], [[TMP1]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -2 -; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[SMAX]], [[DOTNEG]] ; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP6:%.*]] = shl nuw nsw i64 [[TMP5]], 1 +; CHECK-NEXT: [[DOTNOT:%.*]] = sub nsw i64 0, [[TMP6]] +; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[SMAX]], [[DOTNOT]] ; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[N_VEC]], 2 ; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 [[TMP3]] ; CHECK-NEXT: [[TMP4:%.*]] = shl i64 [[N_VEC]], 3 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt-epilogue.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt-epilogue.ll index 5848d31..c54511e 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt-epilogue.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt-epilogue.ll @@ -22,8 +22,6 @@ define void @simple_histogram(ptr noalias %buckets, ptr readonly %indices, i64 % ; CHECK-NEXT: [[TMP7:%.*]] = shl nuw i64 [[TMP2]], 2 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP7]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP5:%.*]] = shl nuw i64 [[TMP4]], 2 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH1]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -32,7 +30,7 @@ define void @simple_histogram(ptr noalias %buckets, ptr readonly %indices, i64 % ; CHECK-NEXT: [[TMP14:%.*]] = zext <vscale x 4 x i32> [[WIDE_LOAD1]] to <vscale x 4 x i64> ; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[BUCKETS]], <vscale x 4 x i64> [[TMP14]] ; CHECK-NEXT: call void @llvm.experimental.vector.histogram.add.nxv4p0.i32(<vscale x 4 x ptr> [[TMP15]], i32 1, <vscale x 4 x i1> splat (i1 true)) -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]] ; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: @@ -49,8 +47,6 @@ define void @simple_histogram(ptr noalias %buckets, ptr readonly %indices, i64 % ; CHECK-NEXT: [[TMP25:%.*]] = shl nuw i64 [[TMP24]], 1 ; CHECK-NEXT: [[N_MOD_VF2:%.*]] = urem i64 [[N]], [[TMP25]] ; CHECK-NEXT: [[N_VEC3:%.*]] = sub i64 [[N]], [[N_MOD_VF2]] -; CHECK-NEXT: [[TMP16:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP17:%.*]] = shl nuw i64 [[TMP16]], 1 ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: vec.epilog.vector.body: ; CHECK-NEXT: [[INDEX4:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT6:%.*]], [[FOR_BODY]] ] @@ -59,7 +55,7 @@ define void @simple_histogram(ptr noalias %buckets, ptr readonly %indices, i64 % ; CHECK-NEXT: [[TMP19:%.*]] = zext <vscale x 2 x i32> [[WIDE_LOAD5]] to <vscale x 2 x i64> ; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr [[BUCKETS]], <vscale x 2 x i64> [[TMP19]] ; CHECK-NEXT: call void @llvm.experimental.vector.histogram.add.nxv2p0.i32(<vscale x 2 x ptr> [[TMP20]], i32 1, <vscale x 2 x i1> splat (i1 true)) -; CHECK-NEXT: [[INDEX_NEXT6]] = add nuw i64 [[INDEX4]], [[TMP17]] +; CHECK-NEXT: [[INDEX_NEXT6]] = add nuw i64 [[INDEX4]], [[TMP25]] ; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT6]], [[N_VEC3]] ; CHECK-NEXT: br i1 [[TMP21]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: vec.epilog.middle.block: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt-too-many-deps.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt-too-many-deps.ll index abee8b9..baf050c 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt-too-many-deps.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt-too-many-deps.ll @@ -70,10 +70,8 @@ define void @many_deps(ptr noalias %buckets, ptr %array, ptr %indices, ptr %othe ; NORMAL_DEP_LIMIT-NEXT: [[TMP8:%.*]] = shl nuw i64 [[TMP4]], 2 ; NORMAL_DEP_LIMIT-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP8]] ; NORMAL_DEP_LIMIT-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NORMAL_DEP_LIMIT-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; NORMAL_DEP_LIMIT-NEXT: [[TMP6:%.*]] = shl nuw i64 [[TMP5]], 2 ; NORMAL_DEP_LIMIT-NEXT: [[TMP7:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32() -; NORMAL_DEP_LIMIT-NEXT: [[TMP9:%.*]] = trunc i64 [[TMP6]] to i32 +; NORMAL_DEP_LIMIT-NEXT: [[TMP9:%.*]] = trunc i64 [[TMP8]] to i32 ; NORMAL_DEP_LIMIT-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP9]], i64 0 ; NORMAL_DEP_LIMIT-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer ; NORMAL_DEP_LIMIT-NEXT: br label [[FOR_BODY:%.*]] @@ -91,7 +89,7 @@ define void @many_deps(ptr noalias %buckets, ptr %array, ptr %indices, ptr %othe ; NORMAL_DEP_LIMIT-NEXT: [[WIDE_LOAD10:%.*]] = load <vscale x 4 x i32>, ptr [[TMP14]], align 4, !alias.scope [[META7:![0-9]+]], !noalias [[META0]] ; NORMAL_DEP_LIMIT-NEXT: [[TMP15:%.*]] = add <vscale x 4 x i32> [[WIDE_LOAD10]], [[VEC_IND]] ; NORMAL_DEP_LIMIT-NEXT: store <vscale x 4 x i32> [[TMP15]], ptr [[TMP14]], align 4, !alias.scope [[META7]], !noalias [[META0]] -; NORMAL_DEP_LIMIT-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], [[TMP6]] +; NORMAL_DEP_LIMIT-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], [[TMP8]] ; NORMAL_DEP_LIMIT-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i32> [[VEC_IND]], [[DOTSPLAT]] ; NORMAL_DEP_LIMIT-NEXT: [[TMP16:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] ; NORMAL_DEP_LIMIT-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll index 9257e45..3b19e9ee 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll @@ -36,11 +36,10 @@ define void @simple_histogram(ptr noalias %buckets, ptr readonly %indices, i64 % ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -4 -; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i64 [[TMP4]], 2 +; CHECK-NEXT: [[DOTNOT:%.*]] = sub nsw i64 0, [[TMP5]] +; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNOT]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -86,11 +85,10 @@ define void @simple_histogram_inc_param(ptr noalias %buckets, ptr readonly %indi ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -4 -; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i64 [[TMP4]], 2 +; CHECK-NEXT: [[DOTNOT:%.*]] = sub nsw i64 0, [[TMP5]] +; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNOT]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -136,11 +134,10 @@ define void @simple_histogram_sub(ptr noalias %buckets, ptr readonly %indices, i ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -4 -; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i64 [[TMP4]], 2 +; CHECK-NEXT: [[DOTNOT:%.*]] = sub nsw i64 0, [[TMP5]] +; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNOT]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -186,11 +183,10 @@ define void @conditional_histogram(ptr noalias %buckets, ptr readonly %indices, ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP3]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -4 -; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i64 [[TMP4]], 2 +; CHECK-NEXT: [[DOTNOT:%.*]] = sub nsw i64 0, [[TMP5]] +; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNOT]] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[FOR_BODY]] ] @@ -248,11 +244,10 @@ define void @histogram_8bit(ptr noalias %buckets, ptr readonly %indices, i64 %N) ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP9]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -4 -; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]] ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 2 +; CHECK-NEXT: [[DOTNOT:%.*]] = sub nsw i64 0, [[TMP4]] +; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNOT]] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] @@ -385,11 +380,10 @@ define void @simple_histogram_user_interleave(ptr noalias %buckets, ptr readonly ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -8 -; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i64 [[TMP4]], 3 +; CHECK-NEXT: [[DOTNOT:%.*]] = sub nsw i64 0, [[TMP5]] +; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNOT]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -446,11 +440,10 @@ define void @histogram_array_3op_gep(i64 noundef %N) #0 { ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -4 -; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]] ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 2 +; CHECK-NEXT: [[DOTNOT:%.*]] = sub nsw i64 0, [[TMP4]] +; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNOT]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -500,11 +493,10 @@ define void @histogram_array_4op_gep_nonzero_const_idx(i64 noundef %N, ptr reado ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -4 -; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]] ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 2 +; CHECK-NEXT: [[DOTNOT:%.*]] = sub nsw i64 0, [[TMP4]] +; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNOT]] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] @@ -611,11 +603,10 @@ define void @simple_histogram_rtdepcheck(ptr noalias %buckets, ptr %array, ptr % ; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP5]], [[TMP4]] ; CHECK-NEXT: br i1 [[DIFF_CHECK]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP6]], -4 -; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]] ; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP8:%.*]] = shl nuw nsw i64 [[TMP7]], 2 +; CHECK-NEXT: [[DOTNOT:%.*]] = sub nsw i64 0, [[TMP8]] +; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNOT]] ; CHECK-NEXT: [[TMP9:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32() ; CHECK-NEXT: [[TMP11:%.*]] = trunc nuw nsw i64 [[TMP8]] to i32 ; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP11]], i64 0 @@ -714,11 +705,10 @@ define void @simple_histogram_64b(ptr noalias %buckets, ptr readonly %indices, i ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -2 -; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]] ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 1 +; CHECK-NEXT: [[DOTNOT:%.*]] = sub nsw i64 0, [[TMP4]] +; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNOT]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/tail-folding-styles.ll b/llvm/test/Transforms/LoopVectorize/AArch64/tail-folding-styles.ll index 124abc6..cdd41a0 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/tail-folding-styles.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/tail-folding-styles.ll @@ -22,8 +22,6 @@ define void @simple_memset_tailfold(i32 %val, ptr %ptr, i64 %n) "target-features ; NONE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; NONE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[UMAX]], [[TMP3]] ; NONE-NEXT: [[N_VEC:%.*]] = sub i64 [[UMAX]], [[N_MOD_VF]] -; NONE-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() -; NONE-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; NONE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[VAL:%.*]], i64 0 ; NONE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer ; NONE-NEXT: br label [[VECTOR_BODY:%.*]] @@ -31,7 +29,7 @@ define void @simple_memset_tailfold(i32 %val, ptr %ptr, i64 %n) "target-features ; NONE-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT2:%.*]], [[VECTOR_BODY]] ] ; NONE-NEXT: [[TMP5:%.*]] = getelementptr i32, ptr [[PTR:%.*]], i64 [[INDEX1]] ; NONE-NEXT: store <vscale x 4 x i32> [[BROADCAST_SPLAT]], ptr [[TMP5]], align 4 -; NONE-NEXT: [[INDEX_NEXT2]] = add nuw i64 [[INDEX1]], [[TMP8]] +; NONE-NEXT: [[INDEX_NEXT2]] = add nuw i64 [[INDEX1]], [[TMP3]] ; NONE-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT2]], [[N_VEC]] ; NONE-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; NONE: middle.block: @@ -61,8 +59,6 @@ define void @simple_memset_tailfold(i32 %val, ptr %ptr, i64 %n) "target-features ; DATA-NEXT: [[N_RND_UP:%.*]] = add i64 [[UMAX]], [[TMP8]] ; DATA-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] ; DATA-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] -; DATA-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64() -; DATA-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP12]], 4 ; DATA-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[VAL:%.*]], i64 0 ; DATA-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer ; DATA-NEXT: br label [[VECTOR_BODY:%.*]] @@ -71,7 +67,7 @@ define void @simple_memset_tailfold(i32 %val, ptr %ptr, i64 %n) "target-features ; DATA-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX1]], i64 [[UMAX]]) ; DATA-NEXT: [[TMP10:%.*]] = getelementptr i32, ptr [[PTR:%.*]], i64 [[INDEX1]] ; DATA-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[BROADCAST_SPLAT]], ptr [[TMP10]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]]) -; DATA-NEXT: [[INDEX_NEXT2]] = add i64 [[INDEX1]], [[TMP13]] +; DATA-NEXT: [[INDEX_NEXT2]] = add i64 [[INDEX1]], [[TMP5]] ; DATA-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT2]], [[N_VEC]] ; DATA-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; DATA: middle.block: @@ -100,8 +96,6 @@ define void @simple_memset_tailfold(i32 %val, ptr %ptr, i64 %n) "target-features ; DATA_NO_LANEMASK-NEXT: [[N_RND_UP:%.*]] = add i64 [[UMAX]], [[TMP8]] ; DATA_NO_LANEMASK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] ; DATA_NO_LANEMASK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] -; DATA_NO_LANEMASK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64() -; DATA_NO_LANEMASK-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP15]], 4 ; DATA_NO_LANEMASK-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[UMAX]], 1 ; DATA_NO_LANEMASK-NEXT: [[BROADCAST_SPLATINSERT5:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0 ; DATA_NO_LANEMASK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT5]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer @@ -118,7 +112,7 @@ define void @simple_memset_tailfold(i32 %val, ptr %ptr, i64 %n) "target-features ; DATA_NO_LANEMASK-NEXT: [[TMP12:%.*]] = icmp ule <vscale x 4 x i64> [[VEC_IV]], [[BROADCAST_SPLAT]] ; DATA_NO_LANEMASK-NEXT: [[TMP13:%.*]] = getelementptr i32, ptr [[PTR:%.*]], i64 [[INDEX1]] ; DATA_NO_LANEMASK-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[BROADCAST_SPLAT5]], ptr [[TMP13]], i32 4, <vscale x 4 x i1> [[TMP12]]) -; DATA_NO_LANEMASK-NEXT: [[INDEX_NEXT6]] = add i64 [[INDEX1]], [[TMP16]] +; DATA_NO_LANEMASK-NEXT: [[INDEX_NEXT6]] = add i64 [[INDEX1]], [[TMP5]] ; DATA_NO_LANEMASK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT6]], [[N_VEC]] ; DATA_NO_LANEMASK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; DATA_NO_LANEMASK: middle.block: @@ -143,12 +137,6 @@ define void @simple_memset_tailfold(i32 %val, ptr %ptr, i64 %n) "target-features ; DATA_AND_CONTROL: vector.ph: ; DATA_AND_CONTROL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; DATA_AND_CONTROL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; DATA_AND_CONTROL-NEXT: [[TMP8:%.*]] = sub i64 [[TMP5]], 1 -; DATA_AND_CONTROL-NEXT: [[N_RND_UP:%.*]] = add i64 [[UMAX]], [[TMP8]] -; DATA_AND_CONTROL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] -; DATA_AND_CONTROL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] -; DATA_AND_CONTROL-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64() -; DATA_AND_CONTROL-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP12]], 4 ; DATA_AND_CONTROL-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 [[UMAX]]) ; DATA_AND_CONTROL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[VAL:%.*]], i64 0 ; DATA_AND_CONTROL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer @@ -158,7 +146,7 @@ define void @simple_memset_tailfold(i32 %val, ptr %ptr, i64 %n) "target-features ; DATA_AND_CONTROL-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 4 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] ; DATA_AND_CONTROL-NEXT: [[TMP10:%.*]] = getelementptr i32, ptr [[PTR:%.*]], i64 [[INDEX1]] ; DATA_AND_CONTROL-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[BROADCAST_SPLAT]], ptr [[TMP10]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]]) -; DATA_AND_CONTROL-NEXT: [[INDEX_NEXT2]] = add i64 [[INDEX1]], [[TMP13]] +; DATA_AND_CONTROL-NEXT: [[INDEX_NEXT2]] = add i64 [[INDEX1]], [[TMP5]] ; DATA_AND_CONTROL-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX_NEXT2]], i64 [[UMAX]]) ; DATA_AND_CONTROL-NEXT: [[TMP14:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) ; DATA_AND_CONTROL-NEXT: [[TMP15:%.*]] = extractelement <vscale x 4 x i1> [[TMP14]], i32 0 @@ -185,12 +173,6 @@ define void @simple_memset_tailfold(i32 %val, ptr %ptr, i64 %n) "target-features ; DATA_AND_CONTROL_NO_RT_CHECK: vector.ph: ; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[TMP1]], 1 -; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[UMAX]], [[TMP4]] -; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] -; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() -; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[TMP14:%.*]] = mul nuw i64 [[TMP13]], 4 ; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() ; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4 ; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[UMAX]], [[TMP6]] @@ -205,7 +187,7 @@ define void @simple_memset_tailfold(i32 %val, ptr %ptr, i64 %n) "target-features ; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 4 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] ; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[PTR:%.*]], i64 [[INDEX1]] ; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[BROADCAST_SPLAT]], ptr [[TMP11]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]]) -; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[INDEX_NEXT2]] = add i64 [[INDEX1]], [[TMP14]] +; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[INDEX_NEXT2]] = add i64 [[INDEX1]], [[TMP1]] ; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX1]], i64 [[TMP9]]) ; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[TMP15:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) ; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[TMP16:%.*]] = extractelement <vscale x 4 x i1> [[TMP15]], i32 0 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/vplan-printing.ll b/llvm/test/Transforms/LoopVectorize/AArch64/vplan-printing.ll index bba9293..4ed9580 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/vplan-printing.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/vplan-printing.ll @@ -73,7 +73,6 @@ define i32 @print_partial_reduction(ptr %a, ptr %b) { ; CHECK-NEXT: } ; CHECK: VPlan 'Final VPlan for VF={8,16},UF={1}' { ; CHECK-NEXT: Live-in ir<[[EP_VFxUF:.+]]> = VF * UF -; CHECK-NEXT: Live-in ir<[[EP_VEC_TC:.+]]> = vector-trip-count ; CHECK-NEXT: Live-in ir<1024> = original trip-count ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<entry>: @@ -83,7 +82,9 @@ define i32 @print_partial_reduction(ptr %a, ptr %b) { ; CHECK-NEXT: Successor(s): ir-bb<scalar.ph>, ir-bb<vector.ph> ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<vector.ph>: -; CHECK-NEXT: EMIT vp<[[RDX_START:%.+]]> = reduction-start-vector ir<0>, ir<0>, ir<4> +; CHECK-NEXT: EMIT vp<%n.mod.vf> = urem ir<1024>, ir<16> +; CHECK-NEXT: EMIT vp<[[VEC_TC:%.+]]> = sub ir<1024>, vp<%n.mod.vf> +; CHECK-NEXT: EMIT vp<[[RDX_START:%.+]]> = reduction-start-vector ir<0>, ir<0>, ir<4> ; CHECK-NEXT: Successor(s): vector.body ; CHECK-EMPTY: ; CHECK-NEXT: vector.body: @@ -98,12 +99,12 @@ define i32 @print_partial_reduction(ptr %a, ptr %b) { ; CHECK-NEXT: WIDEN ir<%mul> = mul ir<%ext.b>, ir<%ext.a> ; CHECK-NEXT: PARTIAL-REDUCE ir<%add> = add ir<%accum>, ir<%mul> ; CHECK-NEXT: EMIT vp<[[EP_IV_NEXT:%.+]]> = add nuw vp<[[EP_IV]]>, ir<16> -; CHECK-NEXT: EMIT branch-on-count vp<[[EP_IV_NEXT]]>, ir<1024> +; CHECK-NEXT: EMIT branch-on-count vp<[[EP_IV_NEXT]]>, vp<[[VEC_TC]]> ; CHECK-NEXT: Successor(s): middle.block, vector.body ; CHECK-EMPTY: ; CHECK-NEXT: middle.block: ; CHECK-NEXT: EMIT vp<[[RED_RESULT:%.+]]> = compute-reduction-result ir<%accum>, ir<%add> -; CHECK-NEXT: EMIT vp<[[CMP:%.+]]> = icmp eq ir<1024>, ir<1024> +; CHECK-NEXT: EMIT vp<[[CMP:%.+]]> = icmp eq ir<1024>, vp<[[VEC_TC]]> ; CHECK-NEXT: EMIT branch-on-cond vp<[[CMP]]> ; CHECK-NEXT: Successor(s): ir-bb<exit>, ir-bb<scalar.ph> ; CHECK-EMPTY: @@ -112,7 +113,7 @@ define i32 @print_partial_reduction(ptr %a, ptr %b) { ; CHECK-NEXT: No successors ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<scalar.ph>: -; CHECK-NEXT: EMIT-SCALAR vp<[[EP_RESUME:%.+]]> = phi [ ir<1024>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: EMIT-SCALAR vp<[[EP_RESUME:%.+]]> = phi [ vp<[[VEC_TC]]>, middle.block ], [ ir<0>, ir-bb<entry> ] ; CHECK-NEXT: EMIT-SCALAR vp<[[EP_MERGE:%.+]]> = phi [ vp<[[RED_RESULT]]>, middle.block ], [ ir<0>, ir-bb<entry> ] ; CHECK-NEXT: Successor(s): ir-bb<for.body> ; CHECK-EMPTY: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/wider-VF-for-callinst.ll b/llvm/test/Transforms/LoopVectorize/AArch64/wider-VF-for-callinst.ll index c1d4317..9edd6ce 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/wider-VF-for-callinst.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/wider-VF-for-callinst.ll @@ -13,8 +13,6 @@ define void @test_widen(ptr noalias %a, ptr readnone %b) #1 { ; WIDE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; WIDE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]] ; WIDE-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] -; WIDE-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() -; WIDE-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 4 ; WIDE-NEXT: br label [[VECTOR_BODY:%.*]] ; WIDE: vector.body: ; WIDE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -24,7 +22,7 @@ define void @test_widen(ptr noalias %a, ptr readnone %b) #1 { ; WIDE-NEXT: [[TMP6:%.*]] = call <vscale x 4 x float> @foo_vector(<vscale x 4 x float> [[TMP5]], <vscale x 4 x i1> splat (i1 true)) ; WIDE-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]] ; WIDE-NEXT: store <vscale x 4 x float> [[TMP6]], ptr [[TMP7]], align 4 -; WIDE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP9]] +; WIDE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; WIDE-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; WIDE-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; WIDE: middle.block: diff --git a/llvm/test/Transforms/LoopVectorize/ARM/mve-known-trip-count.ll b/llvm/test/Transforms/LoopVectorize/ARM/mve-known-trip-count.ll index f4102ff..fe3504b 100644 --- a/llvm/test/Transforms/LoopVectorize/ARM/mve-known-trip-count.ll +++ b/llvm/test/Transforms/LoopVectorize/ARM/mve-known-trip-count.ll @@ -239,8 +239,8 @@ for.body: ; preds = %entry, %for.body define dso_local i32 @predicated_test(i32 noundef %0, ptr %glob) #0 { %2 = alloca [101 x i32], align 4 %3 = alloca [21 x i32], align 4 - call void @llvm.lifetime.start.p0(i64 404, ptr nonnull %2) - call void @llvm.lifetime.start.p0(i64 84, ptr nonnull %3) + call void @llvm.lifetime.start.p0(ptr nonnull %2) + call void @llvm.lifetime.start.p0(ptr nonnull %3) %4 = icmp sgt i32 %0, 0 br i1 %4, label %5, label %159 @@ -433,8 +433,8 @@ define dso_local i32 @predicated_test(i32 noundef %0, ptr %glob) #0 { br label %159 159: ; preds = %158, %1 - call void @llvm.lifetime.end.p0(i64 84, ptr nonnull %3) - call void @llvm.lifetime.end.p0(i64 404, ptr nonnull %2) + call void @llvm.lifetime.end.p0(ptr nonnull %3) + call void @llvm.lifetime.end.p0(ptr nonnull %2) ret i32 0 } @@ -472,7 +472,7 @@ while.end: ; preds = %while.end.loopexit, } -declare void @llvm.lifetime.start.p0(i64, ptr) -declare void @llvm.lifetime.end.p0(i64, ptr) +declare void @llvm.lifetime.start.p0(ptr) +declare void @llvm.lifetime.end.p0(ptr) attributes #0 = { "target-features"="+mve.fp" } diff --git a/llvm/test/Transforms/LoopVectorize/Hexagon/minimum-vf.ll b/llvm/test/Transforms/LoopVectorize/Hexagon/minimum-vf.ll index 1ac556a..26bab4d 100644 --- a/llvm/test/Transforms/LoopVectorize/Hexagon/minimum-vf.ll +++ b/llvm/test/Transforms/LoopVectorize/Hexagon/minimum-vf.ll @@ -11,14 +11,14 @@ target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i @g0 = external dso_local local_unnamed_addr global ptr, align 4 -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #0 -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #0 +declare void @llvm.lifetime.start.p0(ptr nocapture) #0 +declare void @llvm.lifetime.end.p0(ptr nocapture) #0 ; Function Attrs: nounwind define hidden fastcc void @f0(ptr nocapture %a0, i32 %a1, i32 %a2, i32 %a3, i32 %a4, i8 zeroext %a5) unnamed_addr #1 { b0: %v0 = alloca [4 x [9 x i16]], align 8 - call void @llvm.lifetime.start.p0(i64 72, ptr nonnull %v0) #2 + call void @llvm.lifetime.start.p0(ptr nonnull %v0) #2 %v2 = add i32 %a1, -2 %v3 = add i32 %a3, -9 %v4 = icmp ugt i32 %v2, %v3 @@ -147,7 +147,7 @@ b1: ; preds = %b1, %b0 br i1 %v120, label %b2, label %b1 b2: ; preds = %b1, %b0 - call void @llvm.lifetime.end.p0(i64 72, ptr nonnull %v0) #2 + call void @llvm.lifetime.end.p0(ptr nonnull %v0) #2 ret void } diff --git a/llvm/test/Transforms/LoopVectorize/PowerPC/vectorize-only-for-real.ll b/llvm/test/Transforms/LoopVectorize/PowerPC/vectorize-only-for-real.ll index 1bacb57..6b72f20 100644 --- a/llvm/test/Transforms/LoopVectorize/PowerPC/vectorize-only-for-real.ll +++ b/llvm/test/Transforms/LoopVectorize/PowerPC/vectorize-only-for-real.ll @@ -10,11 +10,11 @@ define zeroext i32 @test() #0 { entry: %a = alloca [1600 x i32], align 4 %c = alloca [1600 x i32], align 4 - call void @llvm.lifetime.start(i64 6400, ptr %a) #3 + call void @llvm.lifetime.start(ptr %a) #3 br label %for.body for.cond.cleanup: ; preds = %for.body - call void @llvm.lifetime.start(i64 6400, ptr %c) #3 + call void @llvm.lifetime.start(ptr %c) #3 %call = call signext i32 @bar(ptr %a, ptr %c) #3 br label %for.body6 @@ -28,8 +28,8 @@ for.body: ; preds = %for.body, %entry br i1 %exitcond27, label %for.cond.cleanup, label %for.body for.cond.cleanup5: ; preds = %for.body6 - call void @llvm.lifetime.end(i64 6400, ptr nonnull %c) #3 - call void @llvm.lifetime.end(i64 6400, ptr %a) #3 + call void @llvm.lifetime.end(ptr nonnull %c) #3 + call void @llvm.lifetime.end(ptr %a) #3 ret i32 %add for.body6: ; preds = %for.body6, %for.cond.cleanup @@ -44,10 +44,10 @@ for.body6: ; preds = %for.body6, %for.con } ; Function Attrs: argmemonly nounwind -declare void @llvm.lifetime.start(i64, ptr nocapture) #1 +declare void @llvm.lifetime.start(ptr nocapture) #1 ; Function Attrs: argmemonly nounwind -declare void @llvm.lifetime.end(i64, ptr nocapture) #1 +declare void @llvm.lifetime.end(ptr nocapture) #1 declare signext i32 @bar(ptr, ptr) #2 diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/bf16.ll b/llvm/test/Transforms/LoopVectorize/RISCV/bf16.ll index 7b56ba8..f3e0a5a 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/bf16.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/bf16.ll @@ -26,37 +26,33 @@ define void @fadd(ptr noalias %a, ptr noalias %b, i64 %n) { ; ZVFBFMIN-LABEL: define void @fadd( ; ZVFBFMIN-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] { ; ZVFBFMIN-NEXT: [[ENTRY:.*]]: -; ZVFBFMIN-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() -; ZVFBFMIN-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 8 -; ZVFBFMIN-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP8]] -; ZVFBFMIN-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; ZVFBFMIN-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; ZVFBFMIN: [[VECTOR_PH]]: -; ZVFBFMIN-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() -; ZVFBFMIN-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 8 -; ZVFBFMIN-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP10]] -; ZVFBFMIN-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] ; ZVFBFMIN-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64() ; ZVFBFMIN-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP12]], 8 ; ZVFBFMIN-NEXT: br label %[[VECTOR_BODY:.*]] ; ZVFBFMIN: [[VECTOR_BODY]]: -; ZVFBFMIN-NEXT: [[TMP0:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; ZVFBFMIN-NEXT: [[TMP0:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; ZVFBFMIN-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; ZVFBFMIN-NEXT: [[TMP6:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true) ; ZVFBFMIN-NEXT: [[TMP1:%.*]] = getelementptr bfloat, ptr [[A]], i64 [[TMP0]] ; ZVFBFMIN-NEXT: [[TMP2:%.*]] = getelementptr bfloat, ptr [[B]], i64 [[TMP0]] -; ZVFBFMIN-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x bfloat>, ptr [[TMP1]], align 2 -; ZVFBFMIN-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 8 x bfloat>, ptr [[TMP2]], align 2 +; ZVFBFMIN-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 8 x bfloat> @llvm.vp.load.nxv8bf16.p0(ptr align 2 [[TMP1]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP6]]) +; ZVFBFMIN-NEXT: [[WIDE_LOAD1:%.*]] = call <vscale x 8 x bfloat> @llvm.vp.load.nxv8bf16.p0(ptr align 2 [[TMP2]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP6]]) ; ZVFBFMIN-NEXT: [[TMP11:%.*]] = fadd <vscale x 8 x bfloat> [[WIDE_LOAD]], [[WIDE_LOAD1]] -; ZVFBFMIN-NEXT: store <vscale x 8 x bfloat> [[TMP11]], ptr [[TMP1]], align 2 -; ZVFBFMIN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP0]], [[TMP5]] -; ZVFBFMIN-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; ZVFBFMIN-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; ZVFBFMIN-NEXT: call void @llvm.vp.store.nxv8bf16.p0(<vscale x 8 x bfloat> [[TMP11]], ptr align 2 [[TMP1]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP6]]) +; ZVFBFMIN-NEXT: [[TMP13:%.*]] = zext i32 [[TMP6]] to i64 +; ZVFBFMIN-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP13]], [[TMP0]] +; ZVFBFMIN-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]] +; ZVFBFMIN-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]] +; ZVFBFMIN-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; ZVFBFMIN: [[MIDDLE_BLOCK]]: -; ZVFBFMIN-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] -; ZVFBFMIN-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; ZVFBFMIN-NEXT: br label %[[EXIT:.*]] ; ZVFBFMIN: [[SCALAR_PH]]: -; ZVFBFMIN-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; ZVFBFMIN-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] ; ZVFBFMIN-NEXT: br label %[[LOOP:.*]] ; ZVFBFMIN: [[LOOP]]: -; ZVFBFMIN-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[I_NEXT:%.*]], %[[LOOP]] ] +; ZVFBFMIN-NEXT: [[I:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[I_NEXT:%.*]], %[[LOOP]] ] ; ZVFBFMIN-NEXT: [[A_GEP:%.*]] = getelementptr bfloat, ptr [[A]], i64 [[I]] ; ZVFBFMIN-NEXT: [[B_GEP:%.*]] = getelementptr bfloat, ptr [[B]], i64 [[I]] ; ZVFBFMIN-NEXT: [[X:%.*]] = load bfloat, ptr [[A_GEP]], align 2 @@ -65,7 +61,7 @@ define void @fadd(ptr noalias %a, ptr noalias %b, i64 %n) { ; ZVFBFMIN-NEXT: store bfloat [[Z]], ptr [[A_GEP]], align 2 ; ZVFBFMIN-NEXT: [[I_NEXT]] = add i64 [[I]], 1 ; ZVFBFMIN-NEXT: [[DONE:%.*]] = icmp eq i64 [[I_NEXT]], [[N]] -; ZVFBFMIN-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP3:![0-9]+]] +; ZVFBFMIN-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP4:![0-9]+]] ; ZVFBFMIN: [[EXIT]]: ; ZVFBFMIN-NEXT: ret void ; @@ -138,41 +134,37 @@ define void @vfwmaccbf16.vv(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 ; ZVFBFMIN-LABEL: define void @vfwmaccbf16.vv( ; ZVFBFMIN-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { ; ZVFBFMIN-NEXT: [[ENTRY:.*]]: -; ZVFBFMIN-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; ZVFBFMIN-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; ZVFBFMIN-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]] -; ZVFBFMIN-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; ZVFBFMIN-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; ZVFBFMIN: [[VECTOR_PH]]: -; ZVFBFMIN-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; ZVFBFMIN-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; ZVFBFMIN-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] -; ZVFBFMIN-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] ; ZVFBFMIN-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; ZVFBFMIN-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; ZVFBFMIN-NEXT: br label %[[VECTOR_BODY:.*]] ; ZVFBFMIN: [[VECTOR_BODY]]: -; ZVFBFMIN-NEXT: [[TMP6:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; ZVFBFMIN-NEXT: [[TMP6:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; ZVFBFMIN-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; ZVFBFMIN-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; ZVFBFMIN-NEXT: [[TMP7:%.*]] = getelementptr bfloat, ptr [[A]], i64 [[TMP6]] ; ZVFBFMIN-NEXT: [[TMP8:%.*]] = getelementptr bfloat, ptr [[B]], i64 [[TMP6]] ; ZVFBFMIN-NEXT: [[TMP9:%.*]] = getelementptr float, ptr [[C]], i64 [[TMP6]] -; ZVFBFMIN-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x bfloat>, ptr [[TMP7]], align 2 -; ZVFBFMIN-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x bfloat>, ptr [[TMP8]], align 2 -; ZVFBFMIN-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 4 x float>, ptr [[TMP9]], align 4 +; ZVFBFMIN-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x bfloat> @llvm.vp.load.nxv4bf16.p0(ptr align 2 [[TMP7]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]]) +; ZVFBFMIN-NEXT: [[WIDE_LOAD1:%.*]] = call <vscale x 4 x bfloat> @llvm.vp.load.nxv4bf16.p0(ptr align 2 [[TMP8]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]]) +; ZVFBFMIN-NEXT: [[WIDE_LOAD2:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP9]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]]) ; ZVFBFMIN-NEXT: [[TMP13:%.*]] = fpext <vscale x 4 x bfloat> [[WIDE_LOAD]] to <vscale x 4 x float> ; ZVFBFMIN-NEXT: [[TMP14:%.*]] = fpext <vscale x 4 x bfloat> [[WIDE_LOAD1]] to <vscale x 4 x float> ; ZVFBFMIN-NEXT: [[TMP15:%.*]] = call <vscale x 4 x float> @llvm.fmuladd.nxv4f32(<vscale x 4 x float> [[TMP13]], <vscale x 4 x float> [[TMP14]], <vscale x 4 x float> [[WIDE_LOAD2]]) -; ZVFBFMIN-NEXT: store <vscale x 4 x float> [[TMP15]], ptr [[TMP9]], align 4 -; ZVFBFMIN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP6]], [[TMP5]] -; ZVFBFMIN-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; ZVFBFMIN-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; ZVFBFMIN-NEXT: call void @llvm.vp.store.nxv4f32.p0(<vscale x 4 x float> [[TMP15]], ptr align 4 [[TMP9]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]]) +; ZVFBFMIN-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64 +; ZVFBFMIN-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP12]], [[TMP6]] +; ZVFBFMIN-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP12]] +; ZVFBFMIN-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]] +; ZVFBFMIN-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; ZVFBFMIN: [[MIDDLE_BLOCK]]: -; ZVFBFMIN-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] -; ZVFBFMIN-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; ZVFBFMIN-NEXT: br label %[[EXIT:.*]] ; ZVFBFMIN: [[SCALAR_PH]]: -; ZVFBFMIN-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; ZVFBFMIN-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] ; ZVFBFMIN-NEXT: br label %[[LOOP:.*]] ; ZVFBFMIN: [[LOOP]]: -; ZVFBFMIN-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[I_NEXT:%.*]], %[[LOOP]] ] +; ZVFBFMIN-NEXT: [[I:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[I_NEXT:%.*]], %[[LOOP]] ] ; ZVFBFMIN-NEXT: [[A_GEP:%.*]] = getelementptr bfloat, ptr [[A]], i64 [[I]] ; ZVFBFMIN-NEXT: [[B_GEP:%.*]] = getelementptr bfloat, ptr [[B]], i64 [[I]] ; ZVFBFMIN-NEXT: [[C_GEP:%.*]] = getelementptr float, ptr [[C]], i64 [[I]] @@ -185,7 +177,7 @@ define void @vfwmaccbf16.vv(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 ; ZVFBFMIN-NEXT: store float [[FMULADD]], ptr [[C_GEP]], align 4 ; ZVFBFMIN-NEXT: [[I_NEXT]] = add i64 [[I]], 1 ; ZVFBFMIN-NEXT: [[DONE:%.*]] = icmp eq i64 [[I_NEXT]], [[N]] -; ZVFBFMIN-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP5:![0-9]+]] +; ZVFBFMIN-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP6:![0-9]+]] ; ZVFBFMIN: [[EXIT]]: ; ZVFBFMIN-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/blend-any-of-reduction-cost.ll b/llvm/test/Transforms/LoopVectorize/RISCV/blend-any-of-reduction-cost.ll index 75ae6df..9f7ac7a 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/blend-any-of-reduction-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/blend-any-of-reduction-cost.ll @@ -62,50 +62,10 @@ define i32 @any_of_reduction_used_in_blend_with_multiple_phis(ptr %src, i64 %N, ; CHECK-LABEL: define i32 @any_of_reduction_used_in_blend_with_multiple_phis( ; CHECK-SAME: ptr [[SRC:%.*]], i64 [[N:%.*]], i1 [[C_0:%.*]], i1 [[C_1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*]]: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] -; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 -; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 2 x i1> poison, i1 [[C_1]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 2 x i1> [[BROADCAST_SPLATINSERT1]], <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer -; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i1> poison, i1 [[C_0]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i1> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer -; CHECK-NEXT: [[TMP6:%.*]] = xor <vscale x 2 x i1> [[BROADCAST_SPLAT]], splat (i1 true) -; CHECK-NEXT: [[TMP7:%.*]] = xor <vscale x 2 x i1> [[BROADCAST_SPLAT2]], splat (i1 true) -; CHECK-NEXT: [[TMP8:%.*]] = select <vscale x 2 x i1> [[TMP6]], <vscale x 2 x i1> [[TMP7]], <vscale x 2 x i1> zeroinitializer -; CHECK-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <vscale x 2 x ptr> poison, ptr [[SRC]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <vscale x 2 x ptr> [[BROADCAST_SPLATINSERT3]], <vscale x 2 x ptr> poison, <vscale x 2 x i32> zeroinitializer -; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] -; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 2 x i1> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[PREDPHI:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x ptr> @llvm.masked.gather.nxv2p0.nxv2p0(<vscale x 2 x ptr> [[BROADCAST_SPLAT4]], i32 8, <vscale x 2 x i1> [[TMP8]], <vscale x 2 x ptr> poison) -; CHECK-NEXT: [[TMP9:%.*]] = icmp eq <vscale x 2 x ptr> [[WIDE_MASKED_GATHER]], zeroinitializer -; CHECK-NEXT: [[TMP10:%.*]] = or <vscale x 2 x i1> [[VEC_PHI]], [[TMP9]] -; CHECK-NEXT: [[PREDPHI]] = select <vscale x 2 x i1> [[TMP8]], <vscale x 2 x i1> [[TMP10]], <vscale x 2 x i1> [[VEC_PHI]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP11]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] -; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: [[TMP12:%.*]] = call i1 @llvm.vector.reduce.or.nxv2i1(<vscale x 2 x i1> [[PREDPHI]]) -; CHECK-NEXT: [[TMP13:%.*]] = freeze i1 [[TMP12]] -; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP13]], i32 0, i32 0 -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] -; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] ; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[ANY_OF_RED:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[ANY_OF_RED_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH]] ] +; CHECK-NEXT: [[ANY_OF_RED:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[ANY_OF_RED_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH]] ] ; CHECK-NEXT: br i1 [[C_0]], label %[[X_1:.*]], label %[[ELSE_1:.*]] ; CHECK: [[ELSE_1]]: ; CHECK-NEXT: br i1 [[C_1]], label %[[X_1]], label %[[ELSE_2:.*]] @@ -121,9 +81,9 @@ define i32 @any_of_reduction_used_in_blend_with_multiple_phis(ptr %src, i64 %N, ; CHECK-NEXT: [[ANY_OF_RED_NEXT]] = phi i32 [ [[P]], %[[X_1]] ], [ [[SEL]], %[[ELSE_2]] ] ; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 ; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP_HEADER]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[RES:%.*]] = phi i32 [ [[ANY_OF_RED_NEXT]], %[[LOOP_LATCH]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[RES:%.*]] = phi i32 [ [[ANY_OF_RED_NEXT]], %[[LOOP_LATCH]] ] ; CHECK-NEXT: ret i32 [[RES]] ; entry: @@ -159,9 +119,3 @@ exit: } attributes #0 = { "target-cpu"="sifive-p670" } -;. -; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} -; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} -; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} -; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]} -;. diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/blocks-with-dead-instructions.ll b/llvm/test/Transforms/LoopVectorize/RISCV/blocks-with-dead-instructions.ll index aad9128..606b863 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/blocks-with-dead-instructions.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/blocks-with-dead-instructions.ll @@ -11,45 +11,40 @@ define void @block_with_dead_inst_1(ptr %src, i64 %N) #0 { ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N]], -3 ; CHECK-NEXT: [[TMP1:%.*]] = udiv i64 [[TMP0]], 3 ; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 1 -; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 8 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 [[TMP2]], [[TMP4]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 8 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], [[TMP6]] -; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[N_MOD_VF]], 0 -; CHECK-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], i64 [[TMP6]], i64 [[N_MOD_VF]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[TMP8]] ; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 8 -; CHECK-NEXT: [[IND_END:%.*]] = mul i64 [[N_VEC]], 3 ; CHECK-NEXT: [[TMP11:%.*]] = call <vscale x 8 x i64> @llvm.stepvector.nxv8i64() ; CHECK-NEXT: [[TMP13:%.*]] = mul <vscale x 8 x i64> [[TMP11]], splat (i64 3) ; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 8 x i64> zeroinitializer, [[TMP13]] -; CHECK-NEXT: [[TMP16:%.*]] = mul i64 3, [[TMP10]] -; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP16]], i64 0 -; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 8 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP2]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true) +; CHECK-NEXT: [[TMP17:%.*]] = zext i32 [[TMP12]] to i64 +; CHECK-NEXT: [[TMP16:%.*]] = mul i64 3, [[TMP17]] +; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP16]], i64 0 +; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer ; CHECK-NEXT: [[TMP20:%.*]] = getelementptr i16, ptr [[SRC]], <vscale x 8 x i64> [[VEC_IND]] -; CHECK-NEXT: call void @llvm.masked.scatter.nxv8i16.nxv8p0(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x ptr> [[TMP20]], i32 2, <vscale x 8 x i1> splat (i1 true)) -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]] +; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i16.nxv8p0(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x ptr> align 2 [[TMP20]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP12]]) +; CHECK-NEXT: [[TMP14:%.*]] = zext i32 [[TMP12]] to i64 +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP14]], [[EVL_BASED_IV]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP14]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 8 x i64> [[VEC_IND]], [[DOTSPLAT]] -; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TMP2]] +; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[SCALAR_PH]] +; CHECK-NEXT: br label %[[EXIT:.*]] ; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ 0, %[[MIDDLE_BLOCK]] ], [ 1, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ 1, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] ; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] +; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ 1, %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ] ; CHECK-NEXT: [[XOR]] = xor i16 0, 0 ; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]] ; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2 @@ -62,7 +57,7 @@ define void @block_with_dead_inst_1(ptr %src, i64 %N) #0 { ; CHECK-NEXT: store i16 [[XOR]], ptr [[GEP]], align 2 ; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 3 ; CHECK-NEXT: [[TMP25:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[TMP25]], label %[[EXIT:.*]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP25]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -96,45 +91,40 @@ define void @block_with_dead_inst_2(ptr %src) #0 { ; CHECK-LABEL: define void @block_with_dead_inst_2( ; CHECK-SAME: ptr [[SRC:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*]]: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 333, [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 333, [[TMP3]] -; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[N_MOD_VF]], 0 -; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 [[TMP3]], i64 [[N_MOD_VF]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 333, [[TMP5]] ; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4 -; CHECK-NEXT: [[IND_END:%.*]] = mul i64 [[N_VEC]], 3 -; CHECK-NEXT: [[TMP8:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64() -; CHECK-NEXT: [[TMP10:%.*]] = mul <vscale x 4 x i64> [[TMP8]], splat (i64 3) -; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP10]] -; CHECK-NEXT: [[TMP13:%.*]] = mul i64 3, [[TMP7]] -; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP13]], i64 0 -; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer +; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP6]], 8 +; CHECK-NEXT: [[TMP5:%.*]] = call <vscale x 8 x i64> @llvm.stepvector.nxv8i64() +; CHECK-NEXT: [[TMP8:%.*]] = mul <vscale x 8 x i64> [[TMP5]], splat (i64 3) +; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 8 x i64> zeroinitializer, [[TMP8]] ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i16, ptr [[SRC]], <vscale x 4 x i64> [[VEC_IND]] -; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i16.nxv4p0(<vscale x 4 x i16> zeroinitializer, <vscale x 4 x ptr> [[TMP17]], i32 2, <vscale x 4 x i1> splat (i1 true)) -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]] -; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]] -; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 8 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 333, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true) +; CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP9]] to i64 +; CHECK-NEXT: [[TMP13:%.*]] = mul i64 3, [[TMP7]] +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP13]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i16, ptr [[SRC]], <vscale x 8 x i64> [[VEC_IND]] +; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i16.nxv8p0(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x ptr> align 2 [[TMP10]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP9]]) +; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP9]] to i64 +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP11]], [[EVL_BASED_IV]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]] +; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 8 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 333 +; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[SCALAR_PH]] +; CHECK-NEXT: br label %[[EXIT:.*]] ; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ 0, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ 0, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] ; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] +; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ] ; CHECK-NEXT: [[XOR]] = xor i16 0, 0 ; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]] ; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2 @@ -147,7 +137,7 @@ define void @block_with_dead_inst_2(ptr %src) #0 { ; CHECK-NEXT: store i16 [[XOR]], ptr [[GEP]], align 2 ; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 3 ; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -181,45 +171,40 @@ define void @multiple_blocks_with_dead_insts_3(ptr %src) #0 { ; CHECK-LABEL: define void @multiple_blocks_with_dead_insts_3( ; CHECK-SAME: ptr [[SRC:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*]]: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 333, [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 333, [[TMP3]] -; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[N_MOD_VF]], 0 -; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 [[TMP3]], i64 [[N_MOD_VF]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 333, [[TMP5]] ; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4 -; CHECK-NEXT: [[IND_END:%.*]] = mul i64 [[N_VEC]], 3 -; CHECK-NEXT: [[TMP8:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64() -; CHECK-NEXT: [[TMP10:%.*]] = mul <vscale x 4 x i64> [[TMP8]], splat (i64 3) -; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP10]] -; CHECK-NEXT: [[TMP13:%.*]] = mul i64 3, [[TMP7]] -; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP13]], i64 0 -; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer +; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP6]], 8 +; CHECK-NEXT: [[TMP5:%.*]] = call <vscale x 8 x i64> @llvm.stepvector.nxv8i64() +; CHECK-NEXT: [[TMP8:%.*]] = mul <vscale x 8 x i64> [[TMP5]], splat (i64 3) +; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 8 x i64> zeroinitializer, [[TMP8]] ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i16, ptr [[SRC]], <vscale x 4 x i64> [[VEC_IND]] -; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i16.nxv4p0(<vscale x 4 x i16> zeroinitializer, <vscale x 4 x ptr> [[TMP17]], i32 2, <vscale x 4 x i1> splat (i1 true)) -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]] -; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]] -; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 8 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 333, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true) +; CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP9]] to i64 +; CHECK-NEXT: [[TMP13:%.*]] = mul i64 3, [[TMP7]] +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP13]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i16, ptr [[SRC]], <vscale x 8 x i64> [[VEC_IND]] +; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i16.nxv8p0(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x ptr> align 2 [[TMP10]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP9]]) +; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP9]] to i64 +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP11]], [[EVL_BASED_IV]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]] +; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 8 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 333 +; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[SCALAR_PH]] +; CHECK-NEXT: br label %[[EXIT:.*]] ; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ 0, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ 0, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] ; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] +; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ] ; CHECK-NEXT: [[XOR]] = xor i16 0, 0 ; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]] ; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2 @@ -235,7 +220,7 @@ define void @multiple_blocks_with_dead_insts_3(ptr %src) #0 { ; CHECK-NEXT: store i16 [[XOR]], ptr [[GEP]], align 2 ; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 3 ; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -276,45 +261,40 @@ define void @multiple_blocks_with_dead_insts_4(ptr %src, i64 %N) #0 { ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N]], -3 ; CHECK-NEXT: [[TMP1:%.*]] = udiv i64 [[TMP0]], 3 ; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 1 -; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 8 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 [[TMP2]], [[TMP4]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 8 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], [[TMP6]] -; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[N_MOD_VF]], 0 -; CHECK-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], i64 [[TMP6]], i64 [[N_MOD_VF]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[TMP8]] ; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 8 -; CHECK-NEXT: [[IND_END:%.*]] = mul i64 [[N_VEC]], 3 ; CHECK-NEXT: [[TMP11:%.*]] = call <vscale x 8 x i64> @llvm.stepvector.nxv8i64() ; CHECK-NEXT: [[TMP13:%.*]] = mul <vscale x 8 x i64> [[TMP11]], splat (i64 3) ; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 8 x i64> zeroinitializer, [[TMP13]] -; CHECK-NEXT: [[TMP16:%.*]] = mul i64 3, [[TMP10]] -; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP16]], i64 0 -; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 8 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP2]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true) +; CHECK-NEXT: [[TMP17:%.*]] = zext i32 [[TMP12]] to i64 +; CHECK-NEXT: [[TMP16:%.*]] = mul i64 3, [[TMP17]] +; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP16]], i64 0 +; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer ; CHECK-NEXT: [[TMP20:%.*]] = getelementptr i16, ptr [[SRC]], <vscale x 8 x i64> [[VEC_IND]] -; CHECK-NEXT: call void @llvm.masked.scatter.nxv8i16.nxv8p0(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x ptr> [[TMP20]], i32 2, <vscale x 8 x i1> splat (i1 true)) -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]] +; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i16.nxv8p0(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x ptr> align 2 [[TMP20]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP12]]) +; CHECK-NEXT: [[TMP14:%.*]] = zext i32 [[TMP12]] to i64 +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP14]], [[EVL_BASED_IV]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP14]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 8 x i64> [[VEC_IND]], [[DOTSPLAT]] -; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TMP2]] +; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[SCALAR_PH]] +; CHECK-NEXT: br label %[[EXIT:.*]] ; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ 0, %[[MIDDLE_BLOCK]] ], [ 1, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ 1, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] ; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] +; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ 1, %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ] ; CHECK-NEXT: [[XOR]] = xor i16 0, 0 ; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]] ; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2 @@ -332,7 +312,7 @@ define void @multiple_blocks_with_dead_insts_4(ptr %src, i64 %N) #0 { ; CHECK-NEXT: store i16 [[XOR]], ptr [[GEP]], align 2 ; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 3 ; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP9:![0-9]+]] +; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -373,45 +353,40 @@ define void @multiple_blocks_with_dead_inst_multiple_successors_5(ptr %src) #0 { ; CHECK-LABEL: define void @multiple_blocks_with_dead_inst_multiple_successors_5( ; CHECK-SAME: ptr [[SRC:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*]]: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 333, [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 333, [[TMP3]] -; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[N_MOD_VF]], 0 -; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 [[TMP3]], i64 [[N_MOD_VF]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 333, [[TMP5]] ; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4 -; CHECK-NEXT: [[IND_END:%.*]] = mul i64 [[N_VEC]], 3 -; CHECK-NEXT: [[TMP8:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64() -; CHECK-NEXT: [[TMP10:%.*]] = mul <vscale x 4 x i64> [[TMP8]], splat (i64 3) -; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP10]] -; CHECK-NEXT: [[TMP13:%.*]] = mul i64 3, [[TMP7]] -; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP13]], i64 0 -; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer +; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP6]], 8 +; CHECK-NEXT: [[TMP5:%.*]] = call <vscale x 8 x i64> @llvm.stepvector.nxv8i64() +; CHECK-NEXT: [[TMP8:%.*]] = mul <vscale x 8 x i64> [[TMP5]], splat (i64 3) +; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 8 x i64> zeroinitializer, [[TMP8]] ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i16, ptr [[SRC]], <vscale x 4 x i64> [[VEC_IND]] -; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i16.nxv4p0(<vscale x 4 x i16> zeroinitializer, <vscale x 4 x ptr> [[TMP17]], i32 2, <vscale x 4 x i1> splat (i1 true)) -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]] -; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]] -; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 8 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 333, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true) +; CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP9]] to i64 +; CHECK-NEXT: [[TMP13:%.*]] = mul i64 3, [[TMP7]] +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP13]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i16, ptr [[SRC]], <vscale x 8 x i64> [[VEC_IND]] +; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i16.nxv8p0(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x ptr> align 2 [[TMP10]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP9]]) +; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP9]] to i64 +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP11]], [[EVL_BASED_IV]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]] +; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 8 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 333 +; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[SCALAR_PH]] +; CHECK-NEXT: br label %[[EXIT:.*]] ; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ 0, %[[MIDDLE_BLOCK]] ], [ 1, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ 1, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] ; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] +; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ 1, %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ] ; CHECK-NEXT: [[XOR]] = xor i16 0, 0 ; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]] ; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2 @@ -431,7 +406,7 @@ define void @multiple_blocks_with_dead_inst_multiple_successors_5(ptr %src) #0 { ; CHECK-NEXT: store i16 [[XOR]], ptr [[GEP]], align 2 ; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 3 ; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -478,45 +453,56 @@ define void @multiple_blocks_with_dead_inst_multiple_successors_6(ptr %src, i1 % ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N]], -3 ; CHECK-NEXT: [[TMP1:%.*]] = udiv i64 [[TMP0]], 3 ; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 1 -; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 8 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 [[TMP2]], [[TMP4]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 8 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], [[TMP6]] -; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[N_MOD_VF]], 0 -; CHECK-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], i64 [[TMP6]], i64 [[N_MOD_VF]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[TMP8]] ; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 8 -; CHECK-NEXT: [[IND_END:%.*]] = mul i64 [[N_VEC]], 3 +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i1> poison, i1 [[IC]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 8 x i1> [[BROADCAST_SPLATINSERT]], <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer +; CHECK-NEXT: [[TMP8:%.*]] = xor <vscale x 8 x i1> [[BROADCAST_SPLAT]], splat (i1 true) ; CHECK-NEXT: [[TMP11:%.*]] = call <vscale x 8 x i64> @llvm.stepvector.nxv8i64() ; CHECK-NEXT: [[TMP13:%.*]] = mul <vscale x 8 x i64> [[TMP11]], splat (i64 3) ; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 8 x i64> zeroinitializer, [[TMP13]] -; CHECK-NEXT: [[TMP16:%.*]] = mul i64 3, [[TMP10]] -; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP16]], i64 0 -; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 8 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP2]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP27:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true) +; CHECK-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <vscale x 8 x i32> poison, i32 [[TMP27]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <vscale x 8 x i32> [[BROADCAST_SPLATINSERT3]], <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer +; CHECK-NEXT: [[TMP12:%.*]] = zext i32 [[TMP27]] to i64 +; CHECK-NEXT: [[TMP16:%.*]] = mul i64 3, [[TMP12]] +; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP16]], i64 0 +; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer +; CHECK-NEXT: [[TMP14:%.*]] = call <vscale x 8 x i32> @llvm.stepvector.nxv8i32() +; CHECK-NEXT: [[TMP15:%.*]] = icmp ult <vscale x 8 x i32> [[TMP14]], [[BROADCAST_SPLAT4]] ; CHECK-NEXT: [[TMP20:%.*]] = getelementptr i16, ptr [[SRC]], <vscale x 8 x i64> [[VEC_IND]] -; CHECK-NEXT: call void @llvm.masked.scatter.nxv8i16.nxv8p0(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x ptr> [[TMP20]], i32 2, <vscale x 8 x i1> splat (i1 true)) -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]] +; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 8 x i16> @llvm.vp.gather.nxv8i16.nxv8p0(<vscale x 8 x ptr> align 2 [[TMP20]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP27]]) +; CHECK-NEXT: [[TMP17:%.*]] = icmp eq <vscale x 8 x i16> [[WIDE_MASKED_GATHER]], zeroinitializer +; CHECK-NEXT: [[TMP18:%.*]] = select <vscale x 8 x i1> [[TMP15]], <vscale x 8 x i1> [[TMP17]], <vscale x 8 x i1> zeroinitializer +; CHECK-NEXT: [[TMP19:%.*]] = select <vscale x 8 x i1> [[TMP18]], <vscale x 8 x i1> [[TMP8]], <vscale x 8 x i1> zeroinitializer +; CHECK-NEXT: [[TMP28:%.*]] = xor <vscale x 8 x i1> [[TMP17]], splat (i1 true) +; CHECK-NEXT: [[TMP21:%.*]] = select <vscale x 8 x i1> [[TMP15]], <vscale x 8 x i1> [[TMP28]], <vscale x 8 x i1> zeroinitializer +; CHECK-NEXT: [[TMP22:%.*]] = or <vscale x 8 x i1> [[TMP19]], [[TMP21]] +; CHECK-NEXT: [[TMP23:%.*]] = select <vscale x 8 x i1> [[TMP18]], <vscale x 8 x i1> [[BROADCAST_SPLAT]], <vscale x 8 x i1> zeroinitializer +; CHECK-NEXT: [[TMP24:%.*]] = or <vscale x 8 x i1> [[TMP22]], [[TMP23]] +; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i16.nxv8p0(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x ptr> align 2 [[TMP20]], <vscale x 8 x i1> [[TMP24]], i32 [[TMP27]]) +; CHECK-NEXT: [[TMP25:%.*]] = zext i32 [[TMP27]] to i64 +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP25]], [[EVL_BASED_IV]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP25]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 8 x i64> [[VEC_IND]], [[DOTSPLAT]] -; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TMP2]] +; CHECK-NEXT: br i1 [[TMP26]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[SCALAR_PH]] +; CHECK-NEXT: br label %[[EXIT:.*]] ; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ 0, %[[MIDDLE_BLOCK]] ], [ 1, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ 1, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] ; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] +; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ 1, %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ] ; CHECK-NEXT: [[XOR]] = xor i16 0, 0 ; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]] ; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2 @@ -536,7 +522,7 @@ define void @multiple_blocks_with_dead_inst_multiple_successors_6(ptr %src, i1 % ; CHECK-NEXT: store i16 [[XOR]], ptr [[GEP]], align 2 ; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 3 ; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP13:![0-9]+]] +; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -580,38 +566,39 @@ define void @empty_block_with_phi_1(ptr %src, i64 %N) #0 { ; CHECK-LABEL: define void @empty_block_with_phi_1( ; CHECK-SAME: ptr [[SRC:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*]]: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8 ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[TMP9:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP9:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true) +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i32> poison, i32 [[TMP13]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 8 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer +; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 8 x i32> @llvm.stepvector.nxv8i32() +; CHECK-NEXT: [[TMP7:%.*]] = icmp ult <vscale x 8 x i32> [[TMP6]], [[BROADCAST_SPLAT]] ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[TMP9]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i16>, ptr [[TMP10]], align 2 -; CHECK-NEXT: [[TMP12:%.*]] = icmp eq <vscale x 8 x i16> [[WIDE_LOAD]], zeroinitializer -; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 8 x i1> [[TMP12]], <vscale x 8 x i16> splat (i16 99), <vscale x 8 x i16> [[WIDE_LOAD]] -; CHECK-NEXT: store <vscale x 8 x i16> [[PREDPHI]], ptr [[TMP10]], align 2 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP9]], [[TMP5]] -; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 8 x i16> @llvm.vp.load.nxv8i16.p0(ptr align 2 [[TMP10]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP13]]) +; CHECK-NEXT: [[TMP14:%.*]] = icmp ne <vscale x 8 x i16> [[VP_OP_LOAD]], zeroinitializer +; CHECK-NEXT: [[TMP15:%.*]] = select <vscale x 8 x i1> [[TMP7]], <vscale x 8 x i1> [[TMP14]], <vscale x 8 x i1> zeroinitializer +; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 8 x i1> [[TMP15]], <vscale x 8 x i16> [[VP_OP_LOAD]], <vscale x 8 x i16> splat (i16 99) +; CHECK-NEXT: call void @llvm.vp.store.nxv8i16.p0(<vscale x 8 x i16> [[PREDPHI]], ptr align 2 [[TMP10]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP13]]) +; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP13]] to i64 +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP11]], [[TMP9]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]] +; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; CHECK-NEXT: br label %[[EXIT:.*]] ; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ 0, %[[MIDDLE_BLOCK]] ], [ 1, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ 1, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] ; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[XOR1315:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] +; CHECK-NEXT: [[XOR1315:%.*]] = phi i32 [ 1, %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ] ; CHECK-NEXT: [[XOR]] = xor i32 0, 0 ; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]] ; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2 @@ -624,7 +611,7 @@ define void @empty_block_with_phi_1(ptr %src, i64 %N) #0 { ; CHECK-NEXT: store i16 [[P]], ptr [[GEP]], align 2 ; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 ; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[TMP17]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP15:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP17]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -658,38 +645,39 @@ define void @empty_block_with_phi_2(ptr %src, i64 %N) #0 { ; CHECK-LABEL: define void @empty_block_with_phi_2( ; CHECK-SAME: ptr [[SRC:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*]]: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8 ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[TMP9:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP9:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true) +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i32> poison, i32 [[TMP13]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 8 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer +; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 8 x i32> @llvm.stepvector.nxv8i32() +; CHECK-NEXT: [[TMP7:%.*]] = icmp ult <vscale x 8 x i32> [[TMP6]], [[BROADCAST_SPLAT]] ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[TMP9]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i16>, ptr [[TMP10]], align 2 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 8 x i16> @llvm.vp.load.nxv8i16.p0(ptr align 2 [[TMP10]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP13]]) ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq <vscale x 8 x i16> [[WIDE_LOAD]], zeroinitializer -; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 8 x i1> [[TMP12]], <vscale x 8 x i16> [[WIDE_LOAD]], <vscale x 8 x i16> splat (i16 99) -; CHECK-NEXT: store <vscale x 8 x i16> [[PREDPHI]], ptr [[TMP10]], align 2 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP9]], [[TMP5]] -; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK-NEXT: [[TMP14:%.*]] = select <vscale x 8 x i1> [[TMP7]], <vscale x 8 x i1> [[TMP12]], <vscale x 8 x i1> zeroinitializer +; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 8 x i1> [[TMP14]], <vscale x 8 x i16> [[WIDE_LOAD]], <vscale x 8 x i16> splat (i16 99) +; CHECK-NEXT: call void @llvm.vp.store.nxv8i16.p0(<vscale x 8 x i16> [[PREDPHI]], ptr align 2 [[TMP10]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP13]]) +; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP13]] to i64 +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP11]], [[TMP9]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]] +; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; CHECK-NEXT: br label %[[EXIT:.*]] ; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ 0, %[[MIDDLE_BLOCK]] ], [ 1, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ 1, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] ; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[XOR1315:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] +; CHECK-NEXT: [[XOR1315:%.*]] = phi i32 [ 1, %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ] ; CHECK-NEXT: [[XOR]] = xor i32 0, 0 ; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]] ; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2 @@ -702,7 +690,7 @@ define void @empty_block_with_phi_2(ptr %src, i64 %N) #0 { ; CHECK-NEXT: store i16 [[P]], ptr [[GEP]], align 2 ; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 ; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[TMP18]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP17:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP18]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP18:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -743,11 +731,7 @@ define void @dead_load_in_block(ptr %dst, ptr %src, i8 %N, i64 %x) #0 { ; CHECK-NEXT: [[TMP1:%.*]] = udiv i64 [[TMP0]], 3 ; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[UMIN7]], [[TMP1]] ; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[TMP2]], 1 -; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 -; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.umax.i64(i64 40, i64 [[TMP5]]) -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP3]], [[TMP6]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] ; CHECK: [[VECTOR_MEMCHECK]]: ; CHECK-NEXT: [[UMIN:%.*]] = call i64 @llvm.umin.i64(i64 [[N_EXT]], i64 1) ; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[N_EXT]], [[UMIN]] @@ -770,34 +754,33 @@ define void @dead_load_in_block(ptr %dst, ptr %src, i8 %N, i64 %x) #0 { ; CHECK-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[FOUND_CONFLICT]], [[FOUND_CONFLICT6]] ; CHECK-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP15:%.*]] = mul nuw i64 [[TMP14]], 2 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP3]], [[TMP15]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP16:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP17:%.*]] = mul nuw i64 [[TMP16]], 2 -; CHECK-NEXT: [[IND_END:%.*]] = mul i64 [[N_VEC]], 3 -; CHECK-NEXT: [[TMP18:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64() -; CHECK-NEXT: [[TMP20:%.*]] = mul <vscale x 2 x i64> [[TMP18]], splat (i64 3) -; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP20]] -; CHECK-NEXT: [[TMP23:%.*]] = mul i64 3, [[TMP17]] -; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP23]], i64 0 -; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP15:%.*]] = mul nuw i64 [[TMP16]], 4 +; CHECK-NEXT: [[TMP24:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64() +; CHECK-NEXT: [[TMP25:%.*]] = mul <vscale x 4 x i64> [[TMP24]], splat (i64 3) +; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP25]] ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP24:%.*]] = getelementptr i32, ptr [[DST]], <vscale x 2 x i64> [[VEC_IND]] -; CHECK-NEXT: call void @llvm.masked.scatter.nxv2i32.nxv2p0(<vscale x 2 x i32> zeroinitializer, <vscale x 2 x ptr> [[TMP24]], i32 4, <vscale x 2 x i1> splat (i1 true)), !alias.scope [[META18:![0-9]+]], !noalias [[META21:![0-9]+]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP17]] -; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[DOTSPLAT]] -; CHECK-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP25]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] +; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP3]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP18:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) +; CHECK-NEXT: [[TMP17:%.*]] = zext i32 [[TMP18]] to i64 +; CHECK-NEXT: [[TMP23:%.*]] = mul i64 3, [[TMP17]] +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP23]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer +; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i32, ptr [[DST]], <vscale x 4 x i64> [[VEC_IND]] +; CHECK-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x ptr> align 4 [[TMP21]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP18]]), !alias.scope [[META19:![0-9]+]], !noalias [[META22:![0-9]+]] +; CHECK-NEXT: [[TMP22:%.*]] = zext i32 [[TMP18]] to i64 +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP22]], [[EVL_BASED_IV]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP22]] +; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TMP3]] +; CHECK-NEXT: br i1 [[TMP26]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; CHECK-NEXT: br label %[[EXIT:.*]] ; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] ; CHECK: [[LOOP_HEADER]]: ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] @@ -813,7 +796,7 @@ define void @dead_load_in_block(ptr %dst, ptr %src, i8 %N, i64 %x) #0 { ; CHECK-NEXT: store i32 0, ptr [[GEP_DST]], align 4 ; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 3 ; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[IV]], [[N_EXT]] -; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP_HEADER]], label %[[EXIT]], !llvm.loop [[LOOP25:![0-9]+]] +; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP_HEADER]], label %[[EXIT]], !llvm.loop [[LOOP26:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -845,30 +828,31 @@ exit: attributes #0 = { "target-features"="+64bit,+v" } ;. -; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} +; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]], [[META3:![0-9]+]]} ; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} -; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} -; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]} -; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]} -; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META2]], [[META1]]} -; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META2]]} -; CHECK: [[LOOP7]] = distinct !{[[LOOP7]], [[META2]], [[META1]]} -; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META1]], [[META2]]} -; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META2]], [[META1]]} -; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META1]], [[META2]]} -; CHECK: [[LOOP11]] = distinct !{[[LOOP11]], [[META2]], [[META1]]} -; CHECK: [[LOOP12]] = distinct !{[[LOOP12]], [[META1]], [[META2]]} -; CHECK: [[LOOP13]] = distinct !{[[LOOP13]], [[META2]], [[META1]]} -; CHECK: [[LOOP14]] = distinct !{[[LOOP14]], [[META1]], [[META2]]} -; CHECK: [[LOOP15]] = distinct !{[[LOOP15]], [[META2]], [[META1]]} -; CHECK: [[LOOP16]] = distinct !{[[LOOP16]], [[META1]], [[META2]]} -; CHECK: [[LOOP17]] = distinct !{[[LOOP17]], [[META2]], [[META1]]} -; CHECK: [[META18]] = !{[[META19:![0-9]+]]} -; CHECK: [[META19]] = distinct !{[[META19]], [[META20:![0-9]+]]} -; CHECK: [[META20]] = distinct !{[[META20]], !"LVerDomain"} -; CHECK: [[META21]] = !{[[META22:![0-9]+]], [[META23:![0-9]+]]} -; CHECK: [[META22]] = distinct !{[[META22]], [[META20]]} -; CHECK: [[META23]] = distinct !{[[META23]], [[META20]]} -; CHECK: [[LOOP24]] = distinct !{[[LOOP24]], [[META1]], [[META2]]} -; CHECK: [[LOOP25]] = distinct !{[[LOOP25]], [[META1]]} +; CHECK: [[META2]] = !{!"llvm.loop.isvectorized.tailfoldingstyle", !"evl"} +; CHECK: [[META3]] = !{!"llvm.loop.unroll.runtime.disable"} +; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META3]], [[META1]]} +; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]], [[META3]]} +; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META3]], [[META1]]} +; CHECK: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]], [[META2]], [[META3]]} +; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META3]], [[META1]]} +; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META1]], [[META2]], [[META3]]} +; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META3]], [[META1]]} +; CHECK: [[LOOP11]] = distinct !{[[LOOP11]], [[META1]], [[META2]], [[META3]]} +; CHECK: [[LOOP12]] = distinct !{[[LOOP12]], [[META3]], [[META1]]} +; CHECK: [[LOOP13]] = distinct !{[[LOOP13]], [[META1]], [[META2]], [[META3]]} +; CHECK: [[LOOP14]] = distinct !{[[LOOP14]], [[META3]], [[META1]]} +; CHECK: [[LOOP15]] = distinct !{[[LOOP15]], [[META1]], [[META2]], [[META3]]} +; CHECK: [[LOOP16]] = distinct !{[[LOOP16]], [[META3]], [[META1]]} +; CHECK: [[LOOP17]] = distinct !{[[LOOP17]], [[META1]], [[META2]], [[META3]]} +; CHECK: [[LOOP18]] = distinct !{[[LOOP18]], [[META3]], [[META1]]} +; CHECK: [[META19]] = !{[[META20:![0-9]+]]} +; CHECK: [[META20]] = distinct !{[[META20]], [[META21:![0-9]+]]} +; CHECK: [[META21]] = distinct !{[[META21]], !"LVerDomain"} +; CHECK: [[META22]] = !{[[META23:![0-9]+]], [[META24:![0-9]+]]} +; CHECK: [[META23]] = distinct !{[[META23]], [[META21]]} +; CHECK: [[META24]] = distinct !{[[META24]], [[META21]]} +; CHECK: [[LOOP25]] = distinct !{[[LOOP25]], [[META1]], [[META2]], [[META3]]} +; CHECK: [[LOOP26]] = distinct !{[[LOOP26]], [[META1]]} ;. diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll b/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll index ab8875b..fcfd02b 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll @@ -18,52 +18,46 @@ define void @dead_load(ptr %p, i16 %start) { ; CHECK-NEXT: [[TMP3:%.*]] = udiv i64 [[TMP2]], 3 ; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[UMIN]], [[TMP3]] ; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[TMP4]], 1 -; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 8 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 [[TMP5]], [[TMP7]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 8 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP5]], [[TMP9]] -; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[N_MOD_VF]], 0 -; CHECK-NEXT: [[TMP11:%.*]] = select i1 [[TMP10]], i64 [[TMP9]], i64 [[N_MOD_VF]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP5]], [[TMP11]] ; CHECK-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP14:%.*]] = mul nuw i64 [[TMP13]], 8 -; CHECK-NEXT: [[TMP18:%.*]] = mul i64 [[N_VEC]], 3 -; CHECK-NEXT: [[IND_END:%.*]] = add i64 [[START_EXT]], [[TMP18]] ; CHECK-NEXT: [[TMP15:%.*]] = call <vscale x 8 x i64> @llvm.stepvector.nxv8i64() ; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[START_EXT]], i64 0 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer ; CHECK-NEXT: [[TMP17:%.*]] = mul <vscale x 8 x i64> [[TMP15]], splat (i64 3) ; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 8 x i64> [[DOTSPLAT]], [[TMP17]] -; CHECK-NEXT: [[TMP20:%.*]] = mul i64 3, [[TMP14]] -; CHECK-NEXT: [[DOTSPLATINSERT1:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP20]], i64 0 -; CHECK-NEXT: [[DOTSPLAT2:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT1]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 8 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP5]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP16:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true) +; CHECK-NEXT: [[TMP19:%.*]] = zext i32 [[TMP16]] to i64 +; CHECK-NEXT: [[TMP20:%.*]] = mul i64 3, [[TMP19]] +; CHECK-NEXT: [[DOTSPLATINSERT1:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP20]], i64 0 +; CHECK-NEXT: [[DOTSPLAT2:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT1]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer ; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i16, ptr [[P]], <vscale x 8 x i64> [[VEC_IND]] -; CHECK-NEXT: call void @llvm.masked.scatter.nxv8i16.nxv8p0(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x ptr> [[TMP21]], i32 2, <vscale x 8 x i1> splat (i1 true)) -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP14]] +; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i16.nxv8p0(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x ptr> align 2 [[TMP21]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP16]]) +; CHECK-NEXT: [[TMP22:%.*]] = zext i32 [[TMP16]] to i64 +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP22]], [[EVL_BASED_IV]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP22]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 8 x i64> [[VEC_IND]], [[DOTSPLAT2]] -; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TMP5]] +; CHECK-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[SCALAR_PH]] +; CHECK-NEXT: br label %[[EXIT:.*]] ; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], %[[MIDDLE_BLOCK]] ], [ [[START_EXT]], %[[ENTRY]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[START_EXT]], %[[ENTRY]] ] ; CHECK-NEXT: br label %[[LOOP:.*]] ; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[START_EXT]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] ; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[P]], i64 [[IV]] ; CHECK-NEXT: store i16 0, ptr [[GEP]], align 2 ; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2 ; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 3 ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i64 [[IV]], 111 -; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -94,7 +88,7 @@ define i8 @dead_live_out_due_to_scalar_epilogue_required(ptr %src, ptr %dst) { ; CHECK-NEXT: [[ENTRY:.*]]: ; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.vscale.i32() ; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i32 [[TMP0]], 4 -; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.umax.i32(i32 8, i32 [[TMP1]]) +; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.umax.i32(i32 6, i32 [[TMP1]]) ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i32 252, [[TMP2]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] ; CHECK: [[VECTOR_MEMCHECK]]: @@ -111,13 +105,11 @@ define i8 @dead_live_out_due_to_scalar_epilogue_required(ptr %src, ptr %dst) { ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N_MOD_VF]], 0 ; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], i32 [[TMP4]], i32 [[N_MOD_VF]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 252, [[TMP6]] -; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.vscale.i32() -; CHECK-NEXT: [[TMP8:%.*]] = mul nuw i32 [[TMP7]], 4 ; CHECK-NEXT: [[IND_END:%.*]] = mul i32 [[N_VEC]], 4 ; CHECK-NEXT: [[TMP9:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32() ; CHECK-NEXT: [[TMP11:%.*]] = mul <vscale x 4 x i32> [[TMP9]], splat (i32 4) ; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i32> zeroinitializer, [[TMP11]] -; CHECK-NEXT: [[TMP14:%.*]] = mul i32 4, [[TMP8]] +; CHECK-NEXT: [[TMP14:%.*]] = mul i32 4, [[TMP4]] ; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP14]], i64 0 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] @@ -126,11 +118,11 @@ define i8 @dead_live_out_due_to_scalar_epilogue_required(ptr %src, ptr %dst) { ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i32> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP15:%.*]] = sext <vscale x 4 x i32> [[VEC_IND]] to <vscale x 4 x i64> ; CHECK-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr [[DST]], <vscale x 4 x i64> [[TMP15]] -; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i8.nxv4p0(<vscale x 4 x i8> zeroinitializer, <vscale x 4 x ptr> [[TMP16]], i32 1, <vscale x 4 x i1> splat (i1 true)), !alias.scope [[META4:![0-9]+]], !noalias [[META7:![0-9]+]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP8]] +; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i8.nxv4p0(<vscale x 4 x i8> zeroinitializer, <vscale x 4 x ptr> [[TMP16]], i32 1, <vscale x 4 x i1> splat (i1 true)), !alias.scope [[META5:![0-9]+]], !noalias [[META8:![0-9]+]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP4]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i32> [[VEC_IND]], [[DOTSPLAT]] ; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[SCALAR_PH]] ; CHECK: [[SCALAR_PH]]: @@ -145,7 +137,7 @@ define i8 @dead_live_out_due_to_scalar_epilogue_required(ptr %src, ptr %dst) { ; CHECK-NEXT: store i8 0, ptr [[GEP_DST]], align 1 ; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 4 ; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[IV]], 1001 -; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: [[R:%.*]] = phi i8 [ [[L]], %[[LOOP]] ] ; CHECK-NEXT: ret i8 [[R]] @@ -181,7 +173,7 @@ define i32 @cost_of_exit_branch_and_cond_insts(ptr %a, ptr %b, i1 %c, i16 %x) #0 ; CHECK-NEXT: [[TMP1:%.*]] = sub i32 770, [[UMAX3]] ; CHECK-NEXT: [[SMAX4:%.*]] = call i32 @llvm.smax.i32(i32 [[TMP1]], i32 0) ; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i32 [[SMAX4]], 1 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i32 [[TMP2]], 24 +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i32 [[TMP2]], 19 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] ; CHECK: [[VECTOR_MEMCHECK]]: ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A]], i64 1 @@ -210,48 +202,48 @@ define i32 @cost_of_exit_branch_and_cond_insts(ptr %a, ptr %b, i1 %c, i16 %x) #0 ; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[B]], i32 [[INDEX]] ; CHECK-NEXT: br i1 [[C]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] ; CHECK: [[PRED_STORE_IF]]: -; CHECK-NEXT: store i1 false, ptr [[A]], align 1, !alias.scope [[META11:![0-9]+]], !noalias [[META14:![0-9]+]] +; CHECK-NEXT: store i1 false, ptr [[A]], align 1, !alias.scope [[META12:![0-9]+]], !noalias [[META15:![0-9]+]] ; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE]] ; CHECK: [[PRED_STORE_CONTINUE]]: ; CHECK-NEXT: br i1 [[C]], label %[[PRED_STORE_IF5:.*]], label %[[PRED_STORE_CONTINUE6:.*]] ; CHECK: [[PRED_STORE_IF5]]: -; CHECK-NEXT: store i1 false, ptr [[A]], align 1, !alias.scope [[META11]], !noalias [[META14]] +; CHECK-NEXT: store i1 false, ptr [[A]], align 1, !alias.scope [[META12]], !noalias [[META15]] ; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE6]] ; CHECK: [[PRED_STORE_CONTINUE6]]: ; CHECK-NEXT: br i1 [[C]], label %[[PRED_STORE_IF7:.*]], label %[[PRED_STORE_CONTINUE8:.*]] ; CHECK: [[PRED_STORE_IF7]]: -; CHECK-NEXT: store i1 false, ptr [[A]], align 1, !alias.scope [[META11]], !noalias [[META14]] +; CHECK-NEXT: store i1 false, ptr [[A]], align 1, !alias.scope [[META12]], !noalias [[META15]] ; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE8]] ; CHECK: [[PRED_STORE_CONTINUE8]]: ; CHECK-NEXT: br i1 [[C]], label %[[PRED_STORE_IF9:.*]], label %[[PRED_STORE_CONTINUE10:.*]] ; CHECK: [[PRED_STORE_IF9]]: -; CHECK-NEXT: store i1 false, ptr [[A]], align 1, !alias.scope [[META11]], !noalias [[META14]] +; CHECK-NEXT: store i1 false, ptr [[A]], align 1, !alias.scope [[META12]], !noalias [[META15]] ; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE10]] ; CHECK: [[PRED_STORE_CONTINUE10]]: ; CHECK-NEXT: br i1 [[C]], label %[[PRED_STORE_IF11:.*]], label %[[PRED_STORE_CONTINUE12:.*]] ; CHECK: [[PRED_STORE_IF11]]: -; CHECK-NEXT: store i1 false, ptr [[A]], align 1, !alias.scope [[META11]], !noalias [[META14]] +; CHECK-NEXT: store i1 false, ptr [[A]], align 1, !alias.scope [[META12]], !noalias [[META15]] ; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE12]] ; CHECK: [[PRED_STORE_CONTINUE12]]: ; CHECK-NEXT: br i1 [[C]], label %[[PRED_STORE_IF13:.*]], label %[[PRED_STORE_CONTINUE14:.*]] ; CHECK: [[PRED_STORE_IF13]]: -; CHECK-NEXT: store i1 false, ptr [[A]], align 1, !alias.scope [[META11]], !noalias [[META14]] +; CHECK-NEXT: store i1 false, ptr [[A]], align 1, !alias.scope [[META12]], !noalias [[META15]] ; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE14]] ; CHECK: [[PRED_STORE_CONTINUE14]]: ; CHECK-NEXT: br i1 [[C]], label %[[PRED_STORE_IF15:.*]], label %[[PRED_STORE_CONTINUE16:.*]] ; CHECK: [[PRED_STORE_IF15]]: -; CHECK-NEXT: store i1 false, ptr [[A]], align 1, !alias.scope [[META11]], !noalias [[META14]] +; CHECK-NEXT: store i1 false, ptr [[A]], align 1, !alias.scope [[META12]], !noalias [[META15]] ; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE16]] ; CHECK: [[PRED_STORE_CONTINUE16]]: ; CHECK-NEXT: br i1 [[C]], label %[[PRED_STORE_IF17:.*]], label %[[PRED_STORE_CONTINUE18]] ; CHECK: [[PRED_STORE_IF17]]: -; CHECK-NEXT: store i1 false, ptr [[A]], align 1, !alias.scope [[META11]], !noalias [[META14]] +; CHECK-NEXT: store i1 false, ptr [[A]], align 1, !alias.scope [[META12]], !noalias [[META15]] ; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE18]] ; CHECK: [[PRED_STORE_CONTINUE18]]: -; CHECK-NEXT: call void @llvm.masked.store.v8i32.p0(<8 x i32> zeroinitializer, ptr [[TMP11]], i32 4, <8 x i1> [[BROADCAST_SPLAT]]), !alias.scope [[META14]] +; CHECK-NEXT: call void @llvm.masked.store.v8i32.p0(<8 x i32> zeroinitializer, ptr [[TMP11]], i32 4, <8 x i1> [[BROADCAST_SPLAT]]), !alias.scope [[META15]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8 ; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[SCALAR_PH]] ; CHECK: [[SCALAR_PH]]: @@ -273,7 +265,7 @@ define i32 @cost_of_exit_branch_and_cond_insts(ptr %a, ptr %b, i1 %c, i16 %x) #0 ; CHECK-NEXT: [[EC:%.*]] = icmp slt i32 [[IV]], [[SUB]] ; CHECK-NEXT: br i1 [[EC]], label %[[LOOP_LATCH]], label %[[EXIT:.*]] ; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: br label %[[LOOP_HEADER]], !llvm.loop [[LOOP17:![0-9]+]] +; CHECK-NEXT: br label %[[LOOP_HEADER]], !llvm.loop [[LOOP18:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: br label %[[RETURN:.*]] ; CHECK: [[RETURN]]: @@ -315,45 +307,49 @@ define void @test_phi_in_latch_redundant(ptr %dst, i32 %a) { ; CHECK-LABEL: define void @test_phi_in_latch_redundant( ; CHECK-SAME: ptr [[DST:%.*]], i32 [[A:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*]]: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 37, [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 37, [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 37, [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 -; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[A]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer -; CHECK-NEXT: [[TMP7:%.*]] = mul i64 [[N_VEC]], 9 -; CHECK-NEXT: [[TMP10:%.*]] = xor <vscale x 2 x i32> [[BROADCAST_SPLAT]], splat (i32 -1) -; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64() -; CHECK-NEXT: [[TMP8:%.*]] = mul <vscale x 2 x i64> [[TMP6]], splat (i64 9) -; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP8]] -; CHECK-NEXT: [[TMP9:%.*]] = mul i64 9, [[TMP5]] -; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP9]], i64 0 -; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP4]], 4 +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[A]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer +; CHECK-NEXT: [[TMP19:%.*]] = xor <vscale x 4 x i32> [[BROADCAST_SPLAT]], splat (i32 -1) +; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64() +; CHECK-NEXT: [[TMP7:%.*]] = mul <vscale x 4 x i64> [[TMP6]], splat (i64 9) +; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP7]] ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[DST]], <vscale x 2 x i64> [[VEC_IND]] -; CHECK-NEXT: call void @llvm.masked.scatter.nxv2i32.nxv2p0(<vscale x 2 x i32> [[TMP10]], <vscale x 2 x ptr> [[TMP11]], i32 4, <vscale x 2 x i1> splat (i1 true)) -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[DOTSPLAT]] -; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 37, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP8:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) +; CHECK-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP8]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT3]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer +; CHECK-NEXT: [[TMP5:%.*]] = zext i32 [[TMP8]] to i64 +; CHECK-NEXT: [[TMP9:%.*]] = mul i64 9, [[TMP5]] +; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP9]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT1]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer +; CHECK-NEXT: [[TMP11:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32() +; CHECK-NEXT: [[TMP12:%.*]] = icmp ult <vscale x 4 x i32> [[TMP11]], [[BROADCAST_SPLAT4]] +; CHECK-NEXT: [[TMP13:%.*]] = select <vscale x 4 x i1> [[TMP12]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i1> zeroinitializer +; CHECK-NEXT: [[TMP14:%.*]] = select <vscale x 4 x i1> [[TMP12]], <vscale x 4 x i1> zeroinitializer, <vscale x 4 x i1> zeroinitializer +; CHECK-NEXT: [[TMP15:%.*]] = or <vscale x 4 x i1> [[TMP13]], [[TMP14]] +; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 4 x i1> [[TMP14]], <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> [[TMP19]] +; CHECK-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[DST]], <vscale x 4 x i64> [[VEC_IND]] +; CHECK-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[PREDPHI]], <vscale x 4 x ptr> align 4 [[TMP16]], <vscale x 4 x i1> [[TMP15]], i32 [[TMP8]]) +; CHECK-NEXT: [[TMP17:%.*]] = zext i32 [[TMP8]] to i64 +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP17]], [[EVL_BASED_IV]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP17]] +; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT2]] +; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 37 +; CHECK-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 37, [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; CHECK-NEXT: br label %[[EXIT:.*]] ; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP7]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] ; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] ; CHECK-NEXT: br i1 false, label %[[LOOP_LATCH]], label %[[THEN:.*]] ; CHECK: [[THEN]]: ; CHECK-NEXT: [[NOT_A:%.*]] = xor i32 [[A]], -1 @@ -364,7 +360,7 @@ define void @test_phi_in_latch_redundant(ptr %dst, i32 %a) { ; CHECK-NEXT: store i32 [[P]], ptr [[GEP]], align 4 ; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 9 ; CHECK-NEXT: [[EC:%.*]] = icmp slt i64 [[IV]], 322 -; CHECK-NEXT: br i1 [[EC]], label %[[LOOP_HEADER]], label %[[EXIT]], !llvm.loop [[LOOP19:![0-9]+]] +; CHECK-NEXT: br i1 [[EC]], label %[[LOOP_HEADER]], label %[[EXIT]], !llvm.loop [[LOOP20:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -400,49 +396,50 @@ define void @gather_interleave_group_with_dead_insert_pos(i64 %N, ptr noalias %s ; CHECK-NEXT: [[TMP0:%.*]] = add nuw i64 [[SMAX]], 1 ; CHECK-NEXT: [[TMP1:%.*]] = lshr i64 [[TMP0]], 1 ; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 1 -; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP2]], [[TMP4]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], [[TMP6]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 -; CHECK-NEXT: [[TMP13:%.*]] = mul i64 [[N_VEC]], 2 ; CHECK-NEXT: [[TMP9:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64() ; CHECK-NEXT: [[TMP11:%.*]] = mul <vscale x 4 x i64> [[TMP9]], splat (i64 2) ; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP11]] -; CHECK-NEXT: [[TMP12:%.*]] = mul i64 2, [[TMP8]] -; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP12]], i64 0 -; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 2 -; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[OFFSET_IDX]] -; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i8>, ptr [[TMP15]], align 1 -; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i8>, <vscale x 4 x i8> } @llvm.vector.deinterleave2.nxv8i8(<vscale x 8 x i8> [[WIDE_VEC]]) -; CHECK-NEXT: [[TMP16:%.*]] = extractvalue { <vscale x 4 x i8>, <vscale x 4 x i8> } [[STRIDED_VEC]], 0 +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP2]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) +; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP10]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer +; CHECK-NEXT: [[TMP16:%.*]] = zext i32 [[TMP10]] to i64 +; CHECK-NEXT: [[TMP12:%.*]] = mul i64 2, [[TMP16]] +; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP12]], i64 0 +; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer +; CHECK-NEXT: [[TMP13:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32() +; CHECK-NEXT: [[TMP14:%.*]] = icmp ult <vscale x 4 x i32> [[TMP13]], [[BROADCAST_SPLAT2]] +; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[EVL_BASED_IV]], 2 +; CHECK-NEXT: [[TMP22:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[OFFSET_IDX]] +; CHECK-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 8 x i1> @llvm.vector.interleave2.nxv8i1(<vscale x 4 x i1> [[TMP14]], <vscale x 4 x i1> [[TMP14]]) +; CHECK-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr [[TMP22]], i32 1, <vscale x 8 x i1> [[INTERLEAVED_MASK]], <vscale x 8 x i8> poison) +; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i8>, <vscale x 4 x i8> } @llvm.vector.deinterleave2.nxv8i8(<vscale x 8 x i8> [[WIDE_MASKED_VEC]]) +; CHECK-NEXT: [[TMP23:%.*]] = extractvalue { <vscale x 4 x i8>, <vscale x 4 x i8> } [[STRIDED_VEC]], 0 ; CHECK-NEXT: [[TMP17:%.*]] = extractvalue { <vscale x 4 x i8>, <vscale x 4 x i8> } [[STRIDED_VEC]], 1 ; CHECK-NEXT: [[TMP18:%.*]] = zext <vscale x 4 x i8> [[TMP17]] to <vscale x 4 x i32> ; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i32, ptr [[DST]], <vscale x 4 x i64> [[VEC_IND]] -; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP18]], <vscale x 4 x ptr> [[TMP19]], i32 4, <vscale x 4 x i1> splat (i1 true)) -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] +; CHECK-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP18]], <vscale x 4 x ptr> align 4 [[TMP19]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]]) +; CHECK-NEXT: [[TMP20:%.*]] = zext i32 [[TMP10]] to i64 +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP20]], [[EVL_BASED_IV]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP20]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]] -; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TMP2]] +; CHECK-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP2]], [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; CHECK-NEXT: br label %[[EXIT:.*]] ; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP13]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[LOOP:.*]] ; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ] ; CHECK-NEXT: [[GEP_SRC_0:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[IV]] ; CHECK-NEXT: [[L_DEAD:%.*]] = load i8, ptr [[GEP_SRC_0]], align 1 ; CHECK-NEXT: [[IV_1:%.*]] = add i64 [[IV]], 1 @@ -453,7 +450,7 @@ define void @gather_interleave_group_with_dead_insert_pos(i64 %N, ptr noalias %s ; CHECK-NEXT: store i32 [[EXT]], ptr [[GEP_DST]], align 4 ; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 2 ; CHECK-NEXT: [[EC:%.*]] = icmp slt i64 [[IV]], [[N]] -; CHECK-NEXT: br i1 [[EC]], label %[[LOOP]], label %[[EXIT]], !llvm.loop [[LOOP21:![0-9]+]] +; CHECK-NEXT: br i1 [[EC]], label %[[LOOP]], label %[[EXIT]], !llvm.loop [[LOOP22:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -481,26 +478,27 @@ exit: attributes #0 = { "target-features"="+64bit,+v" } ;. -; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} +; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]], [[META3:![0-9]+]]} ; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} -; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} -; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]} -; CHECK: [[META4]] = !{[[META5:![0-9]+]]} -; CHECK: [[META5]] = distinct !{[[META5]], [[META6:![0-9]+]]} -; CHECK: [[META6]] = distinct !{[[META6]], !"LVerDomain"} -; CHECK: [[META7]] = !{[[META8:![0-9]+]]} -; CHECK: [[META8]] = distinct !{[[META8]], [[META6]]} -; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META1]], [[META2]]} -; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META1]]} -; CHECK: [[META11]] = !{[[META12:![0-9]+]]} -; CHECK: [[META12]] = distinct !{[[META12]], [[META13:![0-9]+]]} -; CHECK: [[META13]] = distinct !{[[META13]], !"LVerDomain"} -; CHECK: [[META14]] = !{[[META15:![0-9]+]]} -; CHECK: [[META15]] = distinct !{[[META15]], [[META13]]} -; CHECK: [[LOOP16]] = distinct !{[[LOOP16]], [[META1]], [[META2]]} -; CHECK: [[LOOP17]] = distinct !{[[LOOP17]], [[META1]]} -; CHECK: [[LOOP18]] = distinct !{[[LOOP18]], [[META1]], [[META2]]} -; CHECK: [[LOOP19]] = distinct !{[[LOOP19]], [[META2]], [[META1]]} -; CHECK: [[LOOP20]] = distinct !{[[LOOP20]], [[META1]], [[META2]]} -; CHECK: [[LOOP21]] = distinct !{[[LOOP21]], [[META2]], [[META1]]} +; CHECK: [[META2]] = !{!"llvm.loop.isvectorized.tailfoldingstyle", !"evl"} +; CHECK: [[META3]] = !{!"llvm.loop.unroll.runtime.disable"} +; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META3]], [[META1]]} +; CHECK: [[META5]] = !{[[META6:![0-9]+]]} +; CHECK: [[META6]] = distinct !{[[META6]], [[META7:![0-9]+]]} +; CHECK: [[META7]] = distinct !{[[META7]], !"LVerDomain"} +; CHECK: [[META8]] = !{[[META9:![0-9]+]]} +; CHECK: [[META9]] = distinct !{[[META9]], [[META7]]} +; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META1]], [[META3]]} +; CHECK: [[LOOP11]] = distinct !{[[LOOP11]], [[META1]]} +; CHECK: [[META12]] = !{[[META13:![0-9]+]]} +; CHECK: [[META13]] = distinct !{[[META13]], [[META14:![0-9]+]]} +; CHECK: [[META14]] = distinct !{[[META14]], !"LVerDomain"} +; CHECK: [[META15]] = !{[[META16:![0-9]+]]} +; CHECK: [[META16]] = distinct !{[[META16]], [[META14]]} +; CHECK: [[LOOP17]] = distinct !{[[LOOP17]], [[META1]], [[META3]]} +; CHECK: [[LOOP18]] = distinct !{[[LOOP18]], [[META1]]} +; CHECK: [[LOOP19]] = distinct !{[[LOOP19]], [[META1]], [[META2]], [[META3]]} +; CHECK: [[LOOP20]] = distinct !{[[LOOP20]], [[META3]], [[META1]]} +; CHECK: [[LOOP21]] = distinct !{[[LOOP21]], [[META1]], [[META2]], [[META3]]} +; CHECK: [[LOOP22]] = distinct !{[[LOOP22]], [[META3]], [[META1]]} ;. diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/defaults.ll b/llvm/test/Transforms/LoopVectorize/RISCV/defaults.ll index db3215a6..2a30724 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/defaults.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/defaults.ll @@ -13,15 +13,8 @@ target triple = "riscv64" define void @vector_add(ptr noalias nocapture %a, i64 %v) { ; CHECK-LABEL: @vector_add( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V:%.*]], i64 0 @@ -29,28 +22,31 @@ define void @vector_add(ptr noalias nocapture %a, i64 %v) { ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP7]], align 8 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]]) ; CHECK-NEXT: [[TMP9:%.*]] = add <vscale x 2 x i64> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] -; CHECK-NEXT: store <vscale x 2 x i64> [[TMP9]], ptr [[TMP7]], align 8 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP9]], ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]]) +; CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP10]] to i64 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP8]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] +; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] +; CHECK-NEXT: br label [[FOR_END:%.*]] ; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] ; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 ; CHECK-NEXT: [[ADD:%.*]] = add i64 [[ELEM]], [[V]] ; CHECK-NEXT: store i64 [[ADD]], ptr [[ARRAYIDX]], align 8 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -74,44 +70,41 @@ for.end: define i64 @vector_add_reduce(ptr noalias nocapture %a) { ; CHECK-LABEL: @vector_add_reduce( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP9:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP8:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP7]], align 8 -; CHECK-NEXT: [[TMP9]] = add <vscale x 2 x i64> [[VEC_PHI]], [[WIDE_LOAD]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP8]]) +; CHECK-NEXT: [[TMP12:%.*]] = add <vscale x 2 x i64> [[VEC_PHI]], [[VP_OP_LOAD]] +; CHECK-NEXT: [[TMP9]] = call <vscale x 2 x i64> @llvm.vp.merge.nxv2i64(<vscale x 2 x i1> splat (i1 true), <vscale x 2 x i64> [[TMP12]], <vscale x 2 x i64> [[VEC_PHI]], i32 [[TMP8]]) +; CHECK-NEXT: [[TMP13:%.*]] = zext i32 [[TMP8]] to i64 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP13]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]] +; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP11:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> [[TMP9]]) -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] +; CHECK-NEXT: br label [[FOR_END:%.*]] ; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] -; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ [[TMP11]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ 0, [[ENTRY]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[SUM:%.*]] = phi i64 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[SUM_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[SUM:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[SUM_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] ; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[SUM_NEXT]] = add i64 [[SUM]], [[ELEM]] ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: for.end: ; CHECK-NEXT: [[SUM_NEXT_LCSSA:%.*]] = phi i64 [ [[SUM_NEXT]], [[FOR_BODY]] ], [ [[TMP11]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i64 [[SUM_NEXT_LCSSA]] diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/divrem.ll b/llvm/test/Transforms/LoopVectorize/RISCV/divrem.ll index f02e5de..d86eb91 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/divrem.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/divrem.ll @@ -10,15 +10,8 @@ target triple = "riscv64" define void @vector_udiv(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-LABEL: @vector_udiv( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V:%.*]], i64 0 @@ -26,28 +19,31 @@ define void @vector_udiv(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP7]], align 8 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]]) ; CHECK-NEXT: [[TMP9:%.*]] = udiv <vscale x 2 x i64> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] -; CHECK-NEXT: store <vscale x 2 x i64> [[TMP9]], ptr [[TMP7]], align 8 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP9]], ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]]) +; CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP10]] to i64 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP8]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] +; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] +; CHECK-NEXT: br label [[FOR_END:%.*]] ; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] ; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 ; CHECK-NEXT: [[DIVREM:%.*]] = udiv i64 [[ELEM]], [[V]] ; CHECK-NEXT: store i64 [[DIVREM]], ptr [[ARRAYIDX]], align 8 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -61,14 +57,10 @@ define void @vector_udiv(ptr noalias nocapture %a, i64 %v, i64 %n) { ; FIXED: vector.body: ; FIXED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; FIXED-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] -; FIXED-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[TMP1]], i32 4 -; FIXED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP1]], align 8 -; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP3]], align 8 -; FIXED-NEXT: [[TMP4:%.*]] = udiv <4 x i64> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] +; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP1]], align 8 ; FIXED-NEXT: [[TMP5:%.*]] = udiv <4 x i64> [[WIDE_LOAD1]], [[BROADCAST_SPLAT]] -; FIXED-NEXT: store <4 x i64> [[TMP4]], ptr [[TMP1]], align 8 -; FIXED-NEXT: store <4 x i64> [[TMP5]], ptr [[TMP3]], align 8 -; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 +; FIXED-NEXT: store <4 x i64> [[TMP5]], ptr [[TMP1]], align 8 +; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; FIXED-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; FIXED-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; FIXED: middle.block: @@ -108,15 +100,8 @@ for.end: define void @vector_sdiv(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-LABEL: @vector_sdiv( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V:%.*]], i64 0 @@ -124,28 +109,31 @@ define void @vector_sdiv(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP7]], align 8 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]]) ; CHECK-NEXT: [[TMP9:%.*]] = sdiv <vscale x 2 x i64> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] -; CHECK-NEXT: store <vscale x 2 x i64> [[TMP9]], ptr [[TMP7]], align 8 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP9]], ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]]) +; CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP10]] to i64 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP8]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] +; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] +; CHECK-NEXT: br label [[FOR_END:%.*]] ; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] ; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 ; CHECK-NEXT: [[DIVREM:%.*]] = sdiv i64 [[ELEM]], [[V]] ; CHECK-NEXT: store i64 [[DIVREM]], ptr [[ARRAYIDX]], align 8 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -159,14 +147,10 @@ define void @vector_sdiv(ptr noalias nocapture %a, i64 %v, i64 %n) { ; FIXED: vector.body: ; FIXED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; FIXED-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] -; FIXED-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[TMP1]], i32 4 -; FIXED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP1]], align 8 -; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP3]], align 8 -; FIXED-NEXT: [[TMP4:%.*]] = sdiv <4 x i64> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] +; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP1]], align 8 ; FIXED-NEXT: [[TMP5:%.*]] = sdiv <4 x i64> [[WIDE_LOAD1]], [[BROADCAST_SPLAT]] -; FIXED-NEXT: store <4 x i64> [[TMP4]], ptr [[TMP1]], align 8 -; FIXED-NEXT: store <4 x i64> [[TMP5]], ptr [[TMP3]], align 8 -; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 +; FIXED-NEXT: store <4 x i64> [[TMP5]], ptr [[TMP1]], align 8 +; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; FIXED-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; FIXED-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; FIXED: middle.block: @@ -206,15 +190,8 @@ for.end: define void @vector_urem(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-LABEL: @vector_urem( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V:%.*]], i64 0 @@ -222,28 +199,31 @@ define void @vector_urem(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP7]], align 8 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]]) ; CHECK-NEXT: [[TMP9:%.*]] = urem <vscale x 2 x i64> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] -; CHECK-NEXT: store <vscale x 2 x i64> [[TMP9]], ptr [[TMP7]], align 8 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP9]], ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]]) +; CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP10]] to i64 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP8]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] +; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] +; CHECK-NEXT: br label [[FOR_END:%.*]] ; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] ; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 ; CHECK-NEXT: [[DIVREM:%.*]] = urem i64 [[ELEM]], [[V]] ; CHECK-NEXT: store i64 [[DIVREM]], ptr [[ARRAYIDX]], align 8 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -257,14 +237,10 @@ define void @vector_urem(ptr noalias nocapture %a, i64 %v, i64 %n) { ; FIXED: vector.body: ; FIXED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; FIXED-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] -; FIXED-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[TMP1]], i32 4 -; FIXED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP1]], align 8 -; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP3]], align 8 -; FIXED-NEXT: [[TMP4:%.*]] = urem <4 x i64> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] +; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP1]], align 8 ; FIXED-NEXT: [[TMP5:%.*]] = urem <4 x i64> [[WIDE_LOAD1]], [[BROADCAST_SPLAT]] -; FIXED-NEXT: store <4 x i64> [[TMP4]], ptr [[TMP1]], align 8 -; FIXED-NEXT: store <4 x i64> [[TMP5]], ptr [[TMP3]], align 8 -; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 +; FIXED-NEXT: store <4 x i64> [[TMP5]], ptr [[TMP1]], align 8 +; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; FIXED-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; FIXED-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; FIXED: middle.block: @@ -304,15 +280,8 @@ for.end: define void @vector_srem(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-LABEL: @vector_srem( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V:%.*]], i64 0 @@ -320,28 +289,31 @@ define void @vector_srem(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP7]], align 8 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]]) ; CHECK-NEXT: [[TMP9:%.*]] = srem <vscale x 2 x i64> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] -; CHECK-NEXT: store <vscale x 2 x i64> [[TMP9]], ptr [[TMP7]], align 8 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP9]], ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]]) +; CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP10]] to i64 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP8]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] +; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] +; CHECK-NEXT: br label [[FOR_END:%.*]] ; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] ; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 ; CHECK-NEXT: [[DIVREM:%.*]] = srem i64 [[ELEM]], [[V]] ; CHECK-NEXT: store i64 [[DIVREM]], ptr [[ARRAYIDX]], align 8 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -355,14 +327,10 @@ define void @vector_srem(ptr noalias nocapture %a, i64 %v, i64 %n) { ; FIXED: vector.body: ; FIXED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; FIXED-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] -; FIXED-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[TMP1]], i32 4 -; FIXED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP1]], align 8 -; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP3]], align 8 -; FIXED-NEXT: [[TMP4:%.*]] = srem <4 x i64> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] +; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP1]], align 8 ; FIXED-NEXT: [[TMP5:%.*]] = srem <4 x i64> [[WIDE_LOAD1]], [[BROADCAST_SPLAT]] -; FIXED-NEXT: store <4 x i64> [[TMP4]], ptr [[TMP1]], align 8 -; FIXED-NEXT: store <4 x i64> [[TMP5]], ptr [[TMP3]], align 8 -; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 +; FIXED-NEXT: store <4 x i64> [[TMP5]], ptr [[TMP1]], align 8 +; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; FIXED-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; FIXED-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; FIXED: middle.block: @@ -402,40 +370,41 @@ for.end: define void @predicated_udiv(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-LABEL: @predicated_udiv( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V:%.*]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer ; CHECK-NEXT: [[TMP6:%.*]] = icmp ne <vscale x 2 x i64> [[BROADCAST_SPLAT]], zeroinitializer -; CHECK-NEXT: [[TMP10:%.*]] = select <vscale x 2 x i1> [[TMP6]], <vscale x 2 x i64> [[BROADCAST_SPLAT]], <vscale x 2 x i64> splat (i64 1) ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) +; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP12]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP7:%.*]] = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32() +; CHECK-NEXT: [[TMP15:%.*]] = icmp ult <vscale x 2 x i32> [[TMP7]], [[BROADCAST_SPLAT2]] ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP8]], align 8 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP8]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]]) +; CHECK-NEXT: [[TMP16:%.*]] = select <vscale x 2 x i1> [[TMP15]], <vscale x 2 x i1> [[TMP6]], <vscale x 2 x i1> zeroinitializer +; CHECK-NEXT: [[TMP10:%.*]] = select <vscale x 2 x i1> [[TMP16]], <vscale x 2 x i64> [[BROADCAST_SPLAT]], <vscale x 2 x i64> splat (i64 1) ; CHECK-NEXT: [[TMP11:%.*]] = udiv <vscale x 2 x i64> [[WIDE_LOAD]], [[TMP10]] -; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 2 x i1> [[TMP6]], <vscale x 2 x i64> [[TMP11]], <vscale x 2 x i64> [[WIDE_LOAD]] -; CHECK-NEXT: store <vscale x 2 x i64> [[PREDPHI]], ptr [[TMP8]], align 8 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 2 x i1> [[TMP16]], <vscale x 2 x i64> [[TMP11]], <vscale x 2 x i64> [[WIDE_LOAD]] +; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[PREDPHI]], ptr align 8 [[TMP8]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]]) +; CHECK-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP13]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]] +; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] +; CHECK-NEXT: br label [[FOR_END:%.*]] ; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] ; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 ; CHECK-NEXT: [[C:%.*]] = icmp ne i64 [[V]], 0 @@ -448,7 +417,7 @@ define void @predicated_udiv(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-NEXT: store i64 [[PHI]], ptr [[ARRAYIDX]], align 8 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -464,16 +433,11 @@ define void @predicated_udiv(ptr noalias nocapture %a, i64 %v, i64 %n) { ; FIXED: vector.body: ; FIXED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; FIXED-NEXT: [[TMP2:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] -; FIXED-NEXT: [[TMP4:%.*]] = getelementptr inbounds i64, ptr [[TMP2]], i32 4 -; FIXED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP2]], align 8 -; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP4]], align 8 -; FIXED-NEXT: [[TMP7:%.*]] = udiv <4 x i64> [[WIDE_LOAD]], [[TMP5]] +; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP2]], align 8 ; FIXED-NEXT: [[TMP8:%.*]] = udiv <4 x i64> [[WIDE_LOAD1]], [[TMP5]] -; FIXED-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP0]], <4 x i64> [[TMP7]], <4 x i64> [[WIDE_LOAD]] ; FIXED-NEXT: [[PREDPHI2:%.*]] = select <4 x i1> [[TMP0]], <4 x i64> [[TMP8]], <4 x i64> [[WIDE_LOAD1]] -; FIXED-NEXT: store <4 x i64> [[PREDPHI]], ptr [[TMP2]], align 8 -; FIXED-NEXT: store <4 x i64> [[PREDPHI2]], ptr [[TMP4]], align 8 -; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 +; FIXED-NEXT: store <4 x i64> [[PREDPHI2]], ptr [[TMP2]], align 8 +; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; FIXED-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; FIXED-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; FIXED: middle.block: @@ -525,40 +489,41 @@ for.end: define void @predicated_sdiv(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-LABEL: @predicated_sdiv( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V:%.*]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer ; CHECK-NEXT: [[TMP6:%.*]] = icmp ne <vscale x 2 x i64> [[BROADCAST_SPLAT]], zeroinitializer -; CHECK-NEXT: [[TMP10:%.*]] = select <vscale x 2 x i1> [[TMP6]], <vscale x 2 x i64> [[BROADCAST_SPLAT]], <vscale x 2 x i64> splat (i64 1) ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) +; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP12]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP7:%.*]] = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32() +; CHECK-NEXT: [[TMP15:%.*]] = icmp ult <vscale x 2 x i32> [[TMP7]], [[BROADCAST_SPLAT2]] ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP8]], align 8 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP8]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]]) +; CHECK-NEXT: [[TMP16:%.*]] = select <vscale x 2 x i1> [[TMP15]], <vscale x 2 x i1> [[TMP6]], <vscale x 2 x i1> zeroinitializer +; CHECK-NEXT: [[TMP10:%.*]] = select <vscale x 2 x i1> [[TMP16]], <vscale x 2 x i64> [[BROADCAST_SPLAT]], <vscale x 2 x i64> splat (i64 1) ; CHECK-NEXT: [[TMP11:%.*]] = sdiv <vscale x 2 x i64> [[WIDE_LOAD]], [[TMP10]] -; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 2 x i1> [[TMP6]], <vscale x 2 x i64> [[TMP11]], <vscale x 2 x i64> [[WIDE_LOAD]] -; CHECK-NEXT: store <vscale x 2 x i64> [[PREDPHI]], ptr [[TMP8]], align 8 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 2 x i1> [[TMP16]], <vscale x 2 x i64> [[TMP11]], <vscale x 2 x i64> [[WIDE_LOAD]] +; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[PREDPHI]], ptr align 8 [[TMP8]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]]) +; CHECK-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP13]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]] +; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] +; CHECK-NEXT: br label [[FOR_END:%.*]] ; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] ; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 ; CHECK-NEXT: [[C:%.*]] = icmp ne i64 [[V]], 0 @@ -571,7 +536,7 @@ define void @predicated_sdiv(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-NEXT: store i64 [[PHI]], ptr [[ARRAYIDX]], align 8 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -587,16 +552,11 @@ define void @predicated_sdiv(ptr noalias nocapture %a, i64 %v, i64 %n) { ; FIXED: vector.body: ; FIXED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; FIXED-NEXT: [[TMP2:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] -; FIXED-NEXT: [[TMP4:%.*]] = getelementptr inbounds i64, ptr [[TMP2]], i32 4 -; FIXED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP2]], align 8 -; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP4]], align 8 -; FIXED-NEXT: [[TMP7:%.*]] = sdiv <4 x i64> [[WIDE_LOAD]], [[TMP5]] +; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP2]], align 8 ; FIXED-NEXT: [[TMP8:%.*]] = sdiv <4 x i64> [[WIDE_LOAD1]], [[TMP5]] -; FIXED-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP0]], <4 x i64> [[TMP7]], <4 x i64> [[WIDE_LOAD]] ; FIXED-NEXT: [[PREDPHI2:%.*]] = select <4 x i1> [[TMP0]], <4 x i64> [[TMP8]], <4 x i64> [[WIDE_LOAD1]] -; FIXED-NEXT: store <4 x i64> [[PREDPHI]], ptr [[TMP2]], align 8 -; FIXED-NEXT: store <4 x i64> [[PREDPHI2]], ptr [[TMP4]], align 8 -; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 +; FIXED-NEXT: store <4 x i64> [[PREDPHI2]], ptr [[TMP2]], align 8 +; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; FIXED-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; FIXED-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; FIXED: middle.block: @@ -648,37 +608,38 @@ for.end: define void @predicated_udiv_by_constant(ptr noalias nocapture %a, i64 %n) { ; CHECK-LABEL: @predicated_udiv_by_constant( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP14]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32() +; CHECK-NEXT: [[TMP15:%.*]] = icmp ult <vscale x 2 x i32> [[TMP6]], [[BROADCAST_SPLAT]] ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP7]], align 8 -; CHECK-NEXT: [[TMP9:%.*]] = icmp ne <vscale x 2 x i64> [[WIDE_LOAD]], splat (i64 42) +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP14]]) +; CHECK-NEXT: [[TMP9:%.*]] = icmp eq <vscale x 2 x i64> [[WIDE_LOAD]], splat (i64 42) ; CHECK-NEXT: [[TMP10:%.*]] = udiv <vscale x 2 x i64> [[WIDE_LOAD]], splat (i64 27) -; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 2 x i1> [[TMP9]], <vscale x 2 x i64> [[TMP10]], <vscale x 2 x i64> [[WIDE_LOAD]] -; CHECK-NEXT: store <vscale x 2 x i64> [[PREDPHI]], ptr [[TMP7]], align 8 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK-NEXT: [[TMP11:%.*]] = select <vscale x 2 x i1> [[TMP15]], <vscale x 2 x i1> [[TMP9]], <vscale x 2 x i1> zeroinitializer +; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 2 x i1> [[TMP11]], <vscale x 2 x i64> [[WIDE_LOAD]], <vscale x 2 x i64> [[TMP10]] +; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[PREDPHI]], ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP14]]) +; CHECK-NEXT: [[TMP12:%.*]] = zext i32 [[TMP14]] to i64 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP12]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP12]] +; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] +; CHECK-NEXT: br label [[FOR_END:%.*]] ; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] ; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 ; CHECK-NEXT: [[C:%.*]] = icmp ne i64 [[ELEM]], 42 @@ -691,7 +652,7 @@ define void @predicated_udiv_by_constant(ptr noalias nocapture %a, i64 %n) { ; CHECK-NEXT: store i64 [[PHI]], ptr [[ARRAYIDX]], align 8 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -703,18 +664,12 @@ define void @predicated_udiv_by_constant(ptr noalias nocapture %a, i64 %n) { ; FIXED: vector.body: ; FIXED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; FIXED-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] -; FIXED-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[TMP1]], i32 4 -; FIXED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP1]], align 8 -; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP3]], align 8 -; FIXED-NEXT: [[TMP4:%.*]] = icmp ne <4 x i64> [[WIDE_LOAD]], splat (i64 42) +; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP1]], align 8 ; FIXED-NEXT: [[TMP5:%.*]] = icmp ne <4 x i64> [[WIDE_LOAD1]], splat (i64 42) -; FIXED-NEXT: [[TMP6:%.*]] = udiv <4 x i64> [[WIDE_LOAD]], splat (i64 27) ; FIXED-NEXT: [[TMP7:%.*]] = udiv <4 x i64> [[WIDE_LOAD1]], splat (i64 27) -; FIXED-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP4]], <4 x i64> [[TMP6]], <4 x i64> [[WIDE_LOAD]] ; FIXED-NEXT: [[PREDPHI2:%.*]] = select <4 x i1> [[TMP5]], <4 x i64> [[TMP7]], <4 x i64> [[WIDE_LOAD1]] -; FIXED-NEXT: store <4 x i64> [[PREDPHI]], ptr [[TMP1]], align 8 -; FIXED-NEXT: store <4 x i64> [[PREDPHI2]], ptr [[TMP3]], align 8 -; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 +; FIXED-NEXT: store <4 x i64> [[PREDPHI2]], ptr [[TMP1]], align 8 +; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; FIXED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; FIXED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; FIXED: middle.block: @@ -766,37 +721,38 @@ for.end: define void @predicated_sdiv_by_constant(ptr noalias nocapture %a, i64 %n) { ; CHECK-LABEL: @predicated_sdiv_by_constant( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP14]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32() +; CHECK-NEXT: [[TMP15:%.*]] = icmp ult <vscale x 2 x i32> [[TMP6]], [[BROADCAST_SPLAT]] ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP7]], align 8 -; CHECK-NEXT: [[TMP9:%.*]] = icmp ne <vscale x 2 x i64> [[WIDE_LOAD]], splat (i64 42) +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP14]]) +; CHECK-NEXT: [[TMP9:%.*]] = icmp eq <vscale x 2 x i64> [[WIDE_LOAD]], splat (i64 42) ; CHECK-NEXT: [[TMP10:%.*]] = sdiv <vscale x 2 x i64> [[WIDE_LOAD]], splat (i64 27) -; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 2 x i1> [[TMP9]], <vscale x 2 x i64> [[TMP10]], <vscale x 2 x i64> [[WIDE_LOAD]] -; CHECK-NEXT: store <vscale x 2 x i64> [[PREDPHI]], ptr [[TMP7]], align 8 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK-NEXT: [[TMP11:%.*]] = select <vscale x 2 x i1> [[TMP15]], <vscale x 2 x i1> [[TMP9]], <vscale x 2 x i1> zeroinitializer +; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 2 x i1> [[TMP11]], <vscale x 2 x i64> [[WIDE_LOAD]], <vscale x 2 x i64> [[TMP10]] +; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[PREDPHI]], ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP14]]) +; CHECK-NEXT: [[TMP12:%.*]] = zext i32 [[TMP14]] to i64 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP12]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP12]] +; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] +; CHECK-NEXT: br label [[FOR_END:%.*]] ; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] ; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 ; CHECK-NEXT: [[C:%.*]] = icmp ne i64 [[ELEM]], 42 @@ -809,7 +765,7 @@ define void @predicated_sdiv_by_constant(ptr noalias nocapture %a, i64 %n) { ; CHECK-NEXT: store i64 [[PHI]], ptr [[ARRAYIDX]], align 8 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -821,18 +777,12 @@ define void @predicated_sdiv_by_constant(ptr noalias nocapture %a, i64 %n) { ; FIXED: vector.body: ; FIXED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; FIXED-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] -; FIXED-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[TMP1]], i32 4 -; FIXED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP1]], align 8 -; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP3]], align 8 -; FIXED-NEXT: [[TMP4:%.*]] = icmp ne <4 x i64> [[WIDE_LOAD]], splat (i64 42) +; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP1]], align 8 ; FIXED-NEXT: [[TMP5:%.*]] = icmp ne <4 x i64> [[WIDE_LOAD1]], splat (i64 42) -; FIXED-NEXT: [[TMP6:%.*]] = sdiv <4 x i64> [[WIDE_LOAD]], splat (i64 27) ; FIXED-NEXT: [[TMP7:%.*]] = sdiv <4 x i64> [[WIDE_LOAD1]], splat (i64 27) -; FIXED-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP4]], <4 x i64> [[TMP6]], <4 x i64> [[WIDE_LOAD]] ; FIXED-NEXT: [[PREDPHI2:%.*]] = select <4 x i1> [[TMP5]], <4 x i64> [[TMP7]], <4 x i64> [[WIDE_LOAD1]] -; FIXED-NEXT: store <4 x i64> [[PREDPHI]], ptr [[TMP1]], align 8 -; FIXED-NEXT: store <4 x i64> [[PREDPHI2]], ptr [[TMP3]], align 8 -; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 +; FIXED-NEXT: store <4 x i64> [[PREDPHI2]], ptr [[TMP1]], align 8 +; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; FIXED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; FIXED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; FIXED: middle.block: @@ -884,38 +834,39 @@ for.end: define void @predicated_sdiv_by_minus_one(ptr noalias nocapture %a, i64 %n) { ; CHECK-LABEL: @predicated_sdiv_by_minus_one( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 16 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 16 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 16, i1 true) +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP12]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer +; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32() +; CHECK-NEXT: [[TMP15:%.*]] = icmp ult <vscale x 16 x i32> [[TMP6]], [[BROADCAST_SPLAT]] ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[A:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP7]], align 1 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[TMP7]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP12]]) ; CHECK-NEXT: [[TMP9:%.*]] = icmp ne <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 -128) -; CHECK-NEXT: [[TMP10:%.*]] = select <vscale x 16 x i1> [[TMP9]], <vscale x 16 x i8> splat (i8 -1), <vscale x 16 x i8> splat (i8 1) +; CHECK-NEXT: [[TMP16:%.*]] = select <vscale x 16 x i1> [[TMP15]], <vscale x 16 x i1> [[TMP9]], <vscale x 16 x i1> zeroinitializer +; CHECK-NEXT: [[TMP10:%.*]] = select <vscale x 16 x i1> [[TMP16]], <vscale x 16 x i8> splat (i8 -1), <vscale x 16 x i8> splat (i8 1) ; CHECK-NEXT: [[TMP11:%.*]] = sdiv <vscale x 16 x i8> [[WIDE_LOAD]], [[TMP10]] -; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 16 x i1> [[TMP9]], <vscale x 16 x i8> [[TMP11]], <vscale x 16 x i8> [[WIDE_LOAD]] -; CHECK-NEXT: store <vscale x 16 x i8> [[PREDPHI]], ptr [[TMP7]], align 1 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 16 x i1> [[TMP16]], <vscale x 16 x i8> [[TMP11]], <vscale x 16 x i8> [[WIDE_LOAD]] +; CHECK-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[PREDPHI]], ptr align 1 [[TMP7]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP12]]) +; CHECK-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP13]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]] +; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] +; CHECK-NEXT: br label [[FOR_END:%.*]] ; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV]] ; CHECK-NEXT: [[ELEM:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 ; CHECK-NEXT: [[C:%.*]] = icmp ne i8 [[ELEM]], -128 @@ -928,7 +879,7 @@ define void @predicated_sdiv_by_minus_one(ptr noalias nocapture %a, i64 %n) { ; CHECK-NEXT: store i8 [[PHI]], ptr [[ARRAYIDX]], align 1 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -940,20 +891,13 @@ define void @predicated_sdiv_by_minus_one(ptr noalias nocapture %a, i64 %n) { ; FIXED: vector.body: ; FIXED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; FIXED-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[A:%.*]], i64 [[INDEX]] -; FIXED-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 32 -; FIXED-NEXT: [[WIDE_LOAD:%.*]] = load <32 x i8>, ptr [[TMP1]], align 1 -; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <32 x i8>, ptr [[TMP3]], align 1 -; FIXED-NEXT: [[TMP4:%.*]] = icmp ne <32 x i8> [[WIDE_LOAD]], splat (i8 -128) +; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <32 x i8>, ptr [[TMP1]], align 1 ; FIXED-NEXT: [[TMP5:%.*]] = icmp ne <32 x i8> [[WIDE_LOAD1]], splat (i8 -128) -; FIXED-NEXT: [[TMP6:%.*]] = select <32 x i1> [[TMP4]], <32 x i8> splat (i8 -1), <32 x i8> splat (i8 1) ; FIXED-NEXT: [[TMP7:%.*]] = select <32 x i1> [[TMP5]], <32 x i8> splat (i8 -1), <32 x i8> splat (i8 1) -; FIXED-NEXT: [[TMP8:%.*]] = sdiv <32 x i8> [[WIDE_LOAD]], [[TMP6]] ; FIXED-NEXT: [[TMP9:%.*]] = sdiv <32 x i8> [[WIDE_LOAD1]], [[TMP7]] -; FIXED-NEXT: [[PREDPHI:%.*]] = select <32 x i1> [[TMP4]], <32 x i8> [[TMP8]], <32 x i8> [[WIDE_LOAD]] ; FIXED-NEXT: [[PREDPHI2:%.*]] = select <32 x i1> [[TMP5]], <32 x i8> [[TMP9]], <32 x i8> [[WIDE_LOAD1]] -; FIXED-NEXT: store <32 x i8> [[PREDPHI]], ptr [[TMP1]], align 1 -; FIXED-NEXT: store <32 x i8> [[PREDPHI2]], ptr [[TMP3]], align 1 -; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 64 +; FIXED-NEXT: store <32 x i8> [[PREDPHI2]], ptr [[TMP1]], align 1 +; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 ; FIXED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; FIXED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; FIXED: middle.block: diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll b/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll index 2b93668..ea8af94 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll @@ -10,12 +10,6 @@ define void @test_wide_integer_induction(ptr noalias %a, i64 %N) { ; CHECK-NEXT: entry: ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 -; CHECK-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1 -; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP6]] -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 2 ; CHECK-NEXT: [[TMP9:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64() @@ -76,12 +70,6 @@ define void @test_wide_ptr_induction(ptr noalias %a, ptr noalias %b, i64 %N) { ; CHECK-NEXT: entry: ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; CHECK-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 -; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP2]] -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/f16.ll b/llvm/test/Transforms/LoopVectorize/RISCV/f16.ll index ba6adc3..1220459 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/f16.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/f16.ll @@ -26,37 +26,33 @@ define void @fadd(ptr noalias %a, ptr noalias %b, i64 %n) { ; ZVFHMIN-LABEL: define void @fadd( ; ZVFHMIN-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] { ; ZVFHMIN-NEXT: [[ENTRY:.*]]: -; ZVFHMIN-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() -; ZVFHMIN-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 8 -; ZVFHMIN-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP8]] -; ZVFHMIN-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; ZVFHMIN-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; ZVFHMIN: [[VECTOR_PH]]: -; ZVFHMIN-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() -; ZVFHMIN-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 8 -; ZVFHMIN-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP10]] -; ZVFHMIN-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] ; ZVFHMIN-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64() ; ZVFHMIN-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP12]], 8 ; ZVFHMIN-NEXT: br label %[[VECTOR_BODY:.*]] ; ZVFHMIN: [[VECTOR_BODY]]: -; ZVFHMIN-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; ZVFHMIN-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; ZVFHMIN-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; ZVFHMIN-NEXT: [[TMP6:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true) ; ZVFHMIN-NEXT: [[TMP1:%.*]] = getelementptr half, ptr [[A]], i64 [[INDEX]] ; ZVFHMIN-NEXT: [[TMP2:%.*]] = getelementptr half, ptr [[B]], i64 [[INDEX]] -; ZVFHMIN-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x half>, ptr [[TMP1]], align 2 -; ZVFHMIN-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 8 x half>, ptr [[TMP2]], align 2 +; ZVFHMIN-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr align 2 [[TMP1]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP6]]) +; ZVFHMIN-NEXT: [[WIDE_LOAD1:%.*]] = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr align 2 [[TMP2]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP6]]) ; ZVFHMIN-NEXT: [[TMP11:%.*]] = fadd <vscale x 8 x half> [[WIDE_LOAD]], [[WIDE_LOAD1]] -; ZVFHMIN-NEXT: store <vscale x 8 x half> [[TMP11]], ptr [[TMP1]], align 2 -; ZVFHMIN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; ZVFHMIN-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; ZVFHMIN-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; ZVFHMIN-NEXT: call void @llvm.vp.store.nxv8f16.p0(<vscale x 8 x half> [[TMP11]], ptr align 2 [[TMP1]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP6]]) +; ZVFHMIN-NEXT: [[TMP13:%.*]] = zext i32 [[TMP6]] to i64 +; ZVFHMIN-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP13]], [[INDEX]] +; ZVFHMIN-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]] +; ZVFHMIN-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]] +; ZVFHMIN-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; ZVFHMIN: [[MIDDLE_BLOCK]]: -; ZVFHMIN-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] -; ZVFHMIN-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; ZVFHMIN-NEXT: br label %[[EXIT:.*]] ; ZVFHMIN: [[SCALAR_PH]]: -; ZVFHMIN-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; ZVFHMIN-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] ; ZVFHMIN-NEXT: br label %[[LOOP:.*]] ; ZVFHMIN: [[LOOP]]: -; ZVFHMIN-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[I_NEXT:%.*]], %[[LOOP]] ] +; ZVFHMIN-NEXT: [[I:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[I_NEXT:%.*]], %[[LOOP]] ] ; ZVFHMIN-NEXT: [[A_GEP:%.*]] = getelementptr half, ptr [[A]], i64 [[I]] ; ZVFHMIN-NEXT: [[B_GEP:%.*]] = getelementptr half, ptr [[B]], i64 [[I]] ; ZVFHMIN-NEXT: [[X:%.*]] = load half, ptr [[A_GEP]], align 2 @@ -65,7 +61,7 @@ define void @fadd(ptr noalias %a, ptr noalias %b, i64 %n) { ; ZVFHMIN-NEXT: store half [[Z]], ptr [[A_GEP]], align 2 ; ZVFHMIN-NEXT: [[I_NEXT]] = add i64 [[I]], 1 ; ZVFHMIN-NEXT: [[DONE:%.*]] = icmp eq i64 [[I_NEXT]], [[N]] -; ZVFHMIN-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP3:![0-9]+]] +; ZVFHMIN-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP4:![0-9]+]] ; ZVFHMIN: [[EXIT]]: ; ZVFHMIN-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/first-order-recurrence-scalable-vf1.ll b/llvm/test/Transforms/LoopVectorize/RISCV/first-order-recurrence-scalable-vf1.ll index ce58ae1..aca00a9 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/first-order-recurrence-scalable-vf1.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/first-order-recurrence-scalable-vf1.ll @@ -15,7 +15,6 @@ define i64 @pr97452_scalable_vf1_for(ptr %src, ptr noalias %dst) #0 { ; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 23, [[TMP1]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 23, [[N_MOD_VF]] -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vscale.i32() ; CHECK-NEXT: [[TMP4:%.*]] = sub i32 [[TMP3]], 1 ; CHECK-NEXT: [[VECTOR_RECUR_INIT:%.*]] = insertelement <vscale x 1 x i64> poison, i64 0, i32 [[TMP4]] @@ -28,7 +27,7 @@ define i64 @pr97452_scalable_vf1_for(ptr %src, ptr noalias %dst) #0 { ; CHECK-NEXT: [[TMP7:%.*]] = call <vscale x 1 x i64> @llvm.vector.splice.nxv1i64(<vscale x 1 x i64> [[VECTOR_RECUR]], <vscale x 1 x i64> [[WIDE_LOAD]], i32 -1) ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[INDEX]] ; CHECK-NEXT: store <vscale x 1 x i64> [[TMP7]], ptr [[TMP8]], align 8 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]] ; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/fminimumnum.ll b/llvm/test/Transforms/LoopVectorize/RISCV/fminimumnum.ll index c9ba2af..713105f 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/fminimumnum.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/fminimumnum.ll @@ -12,7 +12,7 @@ define void @fmin32(ptr noundef readonly captures(none) %input1, ptr noundef rea ; CHECK-NEXT: [[OUTPUT1:%.*]] = ptrtoint ptr [[OUTPUT]] to i64 ; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP8]], 4 -; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP13]]) +; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 15, i64 [[TMP13]]) ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP14]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] ; CHECK: [[VECTOR_MEMCHECK]]: @@ -31,8 +31,6 @@ define void @fmin32(ptr noundef readonly captures(none) %input1, ptr noundef rea ; CHECK-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP9]], 4 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 4096, [[TMP18]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 4096, [[N_MOD_VF]] -; CHECK-NEXT: [[TMP19:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP19]], 4 ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -43,7 +41,7 @@ define void @fmin32(ptr noundef readonly captures(none) %input1, ptr noundef rea ; CHECK-NEXT: [[TMP17:%.*]] = call <vscale x 4 x float> @llvm.minimumnum.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD]], <vscale x 4 x float> [[WIDE_LOAD5]]) ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[OUTPUT]], i64 0, i64 [[INDEX]] ; CHECK-NEXT: store <vscale x 4 x float> [[TMP17]], ptr [[TMP10]], align 4 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP18]] ; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: @@ -75,7 +73,7 @@ define void @fmin32(ptr noundef readonly captures(none) %input1, ptr noundef rea ; ZVFHMIN-NEXT: [[OUTPUT1:%.*]] = ptrtoint ptr [[OUTPUT]] to i64 ; ZVFHMIN-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; ZVFHMIN-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; ZVFHMIN-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]]) +; ZVFHMIN-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 15, i64 [[TMP1]]) ; ZVFHMIN-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP2]] ; ZVFHMIN-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] ; ZVFHMIN: [[VECTOR_MEMCHECK]]: @@ -94,8 +92,6 @@ define void @fmin32(ptr noundef readonly captures(none) %input1, ptr noundef rea ; ZVFHMIN-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 ; ZVFHMIN-NEXT: [[N_MOD_VF:%.*]] = urem i64 4096, [[TMP10]] ; ZVFHMIN-NEXT: [[N_VEC:%.*]] = sub i64 4096, [[N_MOD_VF]] -; ZVFHMIN-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() -; ZVFHMIN-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 4 ; ZVFHMIN-NEXT: br label %[[VECTOR_BODY:.*]] ; ZVFHMIN: [[VECTOR_BODY]]: ; ZVFHMIN-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -106,7 +102,7 @@ define void @fmin32(ptr noundef readonly captures(none) %input1, ptr noundef rea ; ZVFHMIN-NEXT: [[TMP17:%.*]] = call <vscale x 4 x float> @llvm.minimumnum.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD]], <vscale x 4 x float> [[WIDE_LOAD5]]) ; ZVFHMIN-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[OUTPUT]], i64 0, i64 [[INDEX]] ; ZVFHMIN-NEXT: store <vscale x 4 x float> [[TMP17]], ptr [[TMP18]], align 4 -; ZVFHMIN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]] +; ZVFHMIN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]] ; ZVFHMIN-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; ZVFHMIN-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; ZVFHMIN: [[MIDDLE_BLOCK]]: @@ -161,7 +157,7 @@ define void @fmax32(ptr noundef readonly captures(none) %input1, ptr noundef rea ; CHECK-NEXT: [[OUTPUT1:%.*]] = ptrtoint ptr [[OUTPUT]] to i64 ; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP8]], 4 -; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP13]]) +; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 15, i64 [[TMP13]]) ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP14]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] ; CHECK: [[VECTOR_MEMCHECK]]: @@ -180,8 +176,6 @@ define void @fmax32(ptr noundef readonly captures(none) %input1, ptr noundef rea ; CHECK-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP9]], 4 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 4096, [[TMP18]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 4096, [[N_MOD_VF]] -; CHECK-NEXT: [[TMP19:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP19]], 4 ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -192,7 +186,7 @@ define void @fmax32(ptr noundef readonly captures(none) %input1, ptr noundef rea ; CHECK-NEXT: [[TMP17:%.*]] = call <vscale x 4 x float> @llvm.maximumnum.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD]], <vscale x 4 x float> [[WIDE_LOAD5]]) ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[OUTPUT]], i64 0, i64 [[INDEX]] ; CHECK-NEXT: store <vscale x 4 x float> [[TMP17]], ptr [[TMP10]], align 4 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP18]] ; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: @@ -224,7 +218,7 @@ define void @fmax32(ptr noundef readonly captures(none) %input1, ptr noundef rea ; ZVFHMIN-NEXT: [[OUTPUT1:%.*]] = ptrtoint ptr [[OUTPUT]] to i64 ; ZVFHMIN-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; ZVFHMIN-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; ZVFHMIN-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]]) +; ZVFHMIN-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 15, i64 [[TMP1]]) ; ZVFHMIN-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP2]] ; ZVFHMIN-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] ; ZVFHMIN: [[VECTOR_MEMCHECK]]: @@ -243,8 +237,6 @@ define void @fmax32(ptr noundef readonly captures(none) %input1, ptr noundef rea ; ZVFHMIN-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 ; ZVFHMIN-NEXT: [[N_MOD_VF:%.*]] = urem i64 4096, [[TMP10]] ; ZVFHMIN-NEXT: [[N_VEC:%.*]] = sub i64 4096, [[N_MOD_VF]] -; ZVFHMIN-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() -; ZVFHMIN-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 4 ; ZVFHMIN-NEXT: br label %[[VECTOR_BODY:.*]] ; ZVFHMIN: [[VECTOR_BODY]]: ; ZVFHMIN-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -255,7 +247,7 @@ define void @fmax32(ptr noundef readonly captures(none) %input1, ptr noundef rea ; ZVFHMIN-NEXT: [[TMP17:%.*]] = call <vscale x 4 x float> @llvm.maximumnum.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD]], <vscale x 4 x float> [[WIDE_LOAD5]]) ; ZVFHMIN-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[OUTPUT]], i64 0, i64 [[INDEX]] ; ZVFHMIN-NEXT: store <vscale x 4 x float> [[TMP17]], ptr [[TMP18]], align 4 -; ZVFHMIN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]] +; ZVFHMIN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]] ; ZVFHMIN-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; ZVFHMIN-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; ZVFHMIN: [[MIDDLE_BLOCK]]: @@ -310,7 +302,7 @@ define void @fmin64(ptr noundef readonly captures(none) %input1, ptr noundef rea ; CHECK-NEXT: [[OUTPUT1:%.*]] = ptrtoint ptr [[OUTPUT]] to i64 ; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP8]], 2 -; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP13]]) +; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 15, i64 [[TMP13]]) ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP14]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] ; CHECK: [[VECTOR_MEMCHECK]]: @@ -329,8 +321,6 @@ define void @fmin64(ptr noundef readonly captures(none) %input1, ptr noundef rea ; CHECK-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP9]], 2 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 4096, [[TMP18]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 4096, [[N_MOD_VF]] -; CHECK-NEXT: [[TMP19:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP19]], 2 ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -341,7 +331,7 @@ define void @fmin64(ptr noundef readonly captures(none) %input1, ptr noundef rea ; CHECK-NEXT: [[TMP17:%.*]] = call <vscale x 2 x double> @llvm.minimumnum.nxv2f64(<vscale x 2 x double> [[WIDE_LOAD]], <vscale x 2 x double> [[WIDE_LOAD5]]) ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[OUTPUT]], i64 0, i64 [[INDEX]] ; CHECK-NEXT: store <vscale x 2 x double> [[TMP17]], ptr [[TMP10]], align 8 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP18]] ; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: @@ -373,7 +363,7 @@ define void @fmin64(ptr noundef readonly captures(none) %input1, ptr noundef rea ; ZVFHMIN-NEXT: [[OUTPUT1:%.*]] = ptrtoint ptr [[OUTPUT]] to i64 ; ZVFHMIN-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; ZVFHMIN-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; ZVFHMIN-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]]) +; ZVFHMIN-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 15, i64 [[TMP1]]) ; ZVFHMIN-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP2]] ; ZVFHMIN-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] ; ZVFHMIN: [[VECTOR_MEMCHECK]]: @@ -392,8 +382,6 @@ define void @fmin64(ptr noundef readonly captures(none) %input1, ptr noundef rea ; ZVFHMIN-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 2 ; ZVFHMIN-NEXT: [[N_MOD_VF:%.*]] = urem i64 4096, [[TMP10]] ; ZVFHMIN-NEXT: [[N_VEC:%.*]] = sub i64 4096, [[N_MOD_VF]] -; ZVFHMIN-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() -; ZVFHMIN-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 2 ; ZVFHMIN-NEXT: br label %[[VECTOR_BODY:.*]] ; ZVFHMIN: [[VECTOR_BODY]]: ; ZVFHMIN-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -404,7 +392,7 @@ define void @fmin64(ptr noundef readonly captures(none) %input1, ptr noundef rea ; ZVFHMIN-NEXT: [[TMP17:%.*]] = call <vscale x 2 x double> @llvm.minimumnum.nxv2f64(<vscale x 2 x double> [[WIDE_LOAD]], <vscale x 2 x double> [[WIDE_LOAD5]]) ; ZVFHMIN-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[OUTPUT]], i64 0, i64 [[INDEX]] ; ZVFHMIN-NEXT: store <vscale x 2 x double> [[TMP17]], ptr [[TMP18]], align 8 -; ZVFHMIN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]] +; ZVFHMIN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]] ; ZVFHMIN-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; ZVFHMIN-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; ZVFHMIN: [[MIDDLE_BLOCK]]: @@ -459,7 +447,7 @@ define void @fmax64(ptr noundef readonly captures(none) %input1, ptr noundef rea ; CHECK-NEXT: [[OUTPUT1:%.*]] = ptrtoint ptr [[OUTPUT]] to i64 ; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP8]], 2 -; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP13]]) +; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 15, i64 [[TMP13]]) ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP14]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] ; CHECK: [[VECTOR_MEMCHECK]]: @@ -478,8 +466,6 @@ define void @fmax64(ptr noundef readonly captures(none) %input1, ptr noundef rea ; CHECK-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP9]], 2 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 4096, [[TMP18]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 4096, [[N_MOD_VF]] -; CHECK-NEXT: [[TMP19:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP19]], 2 ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -490,7 +476,7 @@ define void @fmax64(ptr noundef readonly captures(none) %input1, ptr noundef rea ; CHECK-NEXT: [[TMP17:%.*]] = call <vscale x 2 x double> @llvm.maximumnum.nxv2f64(<vscale x 2 x double> [[WIDE_LOAD]], <vscale x 2 x double> [[WIDE_LOAD5]]) ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[OUTPUT]], i64 0, i64 [[INDEX]] ; CHECK-NEXT: store <vscale x 2 x double> [[TMP17]], ptr [[TMP10]], align 8 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP18]] ; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: @@ -522,7 +508,7 @@ define void @fmax64(ptr noundef readonly captures(none) %input1, ptr noundef rea ; ZVFHMIN-NEXT: [[OUTPUT1:%.*]] = ptrtoint ptr [[OUTPUT]] to i64 ; ZVFHMIN-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; ZVFHMIN-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; ZVFHMIN-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]]) +; ZVFHMIN-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 15, i64 [[TMP1]]) ; ZVFHMIN-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP2]] ; ZVFHMIN-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] ; ZVFHMIN: [[VECTOR_MEMCHECK]]: @@ -541,8 +527,6 @@ define void @fmax64(ptr noundef readonly captures(none) %input1, ptr noundef rea ; ZVFHMIN-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 2 ; ZVFHMIN-NEXT: [[N_MOD_VF:%.*]] = urem i64 4096, [[TMP10]] ; ZVFHMIN-NEXT: [[N_VEC:%.*]] = sub i64 4096, [[N_MOD_VF]] -; ZVFHMIN-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() -; ZVFHMIN-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 2 ; ZVFHMIN-NEXT: br label %[[VECTOR_BODY:.*]] ; ZVFHMIN: [[VECTOR_BODY]]: ; ZVFHMIN-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -553,7 +537,7 @@ define void @fmax64(ptr noundef readonly captures(none) %input1, ptr noundef rea ; ZVFHMIN-NEXT: [[TMP17:%.*]] = call <vscale x 2 x double> @llvm.maximumnum.nxv2f64(<vscale x 2 x double> [[WIDE_LOAD]], <vscale x 2 x double> [[WIDE_LOAD5]]) ; ZVFHMIN-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[OUTPUT]], i64 0, i64 [[INDEX]] ; ZVFHMIN-NEXT: store <vscale x 2 x double> [[TMP17]], ptr [[TMP18]], align 8 -; ZVFHMIN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]] +; ZVFHMIN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]] ; ZVFHMIN-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; ZVFHMIN-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; ZVFHMIN: [[MIDDLE_BLOCK]]: @@ -606,11 +590,7 @@ define void @fmin16(ptr noundef readonly captures(none) %input1, ptr noundef rea ; CHECK-NEXT: [[INPUT23:%.*]] = ptrtoint ptr [[INPUT2]] to i64 ; CHECK-NEXT: [[INPUT12:%.*]] = ptrtoint ptr [[INPUT1]] to i64 ; CHECK-NEXT: [[OUTPUT1:%.*]] = ptrtoint ptr [[OUTPUT]] to i64 -; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP6]], 8 -; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP13]]) -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP14]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] ; CHECK: [[VECTOR_MEMCHECK]]: ; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP15]], 8 @@ -623,30 +603,29 @@ define void @fmin16(ptr noundef readonly captures(none) %input1, ptr noundef rea ; CHECK-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]] ; CHECK-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 8 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 4096, [[TMP10]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 4096, [[N_MOD_VF]] ; CHECK-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 8 ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 4096, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true) ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT1]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x half>, ptr [[TMP2]], align 2 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr align 2 [[TMP2]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP13]]) ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT2]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 8 x half>, ptr [[TMP4]], align 2 +; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr align 2 [[TMP4]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP13]]) ; CHECK-NEXT: [[TMP17:%.*]] = call <vscale x 8 x half> @llvm.minimumnum.nxv8f16(<vscale x 8 x half> [[WIDE_LOAD]], <vscale x 8 x half> [[WIDE_LOAD5]]) ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[OUTPUT]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: store <vscale x 8 x half> [[TMP17]], ptr [[TMP7]], align 2 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]] -; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-NEXT: call void @llvm.vp.store.nxv8f16.p0(<vscale x 8 x half> [[TMP17]], ptr align 2 [[TMP7]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP13]]) +; CHECK-NEXT: [[TMP20:%.*]] = zext i32 [[TMP13]] to i64 +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP20]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP20]] +; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 4096 +; CHECK-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 4096, [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; CHECK-NEXT: br label %[[EXIT:.*]] ; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; CHECK-NEXT: br label %[[FOR_BODY:.*]] ; CHECK: [[FOR_BODY]]: ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] @@ -659,7 +638,7 @@ define void @fmin16(ptr noundef readonly captures(none) %input1, ptr noundef rea ; CHECK-NEXT: store half [[OUT]], ptr [[ARRAYIDX4]], align 2 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 4096 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -669,11 +648,7 @@ define void @fmin16(ptr noundef readonly captures(none) %input1, ptr noundef rea ; ZVFHMIN-NEXT: [[INPUT23:%.*]] = ptrtoint ptr [[INPUT2]] to i64 ; ZVFHMIN-NEXT: [[INPUT12:%.*]] = ptrtoint ptr [[INPUT1]] to i64 ; ZVFHMIN-NEXT: [[OUTPUT1:%.*]] = ptrtoint ptr [[OUTPUT]] to i64 -; ZVFHMIN-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; ZVFHMIN-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8 -; ZVFHMIN-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]]) -; ZVFHMIN-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP2]] -; ZVFHMIN-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; ZVFHMIN-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] ; ZVFHMIN: [[VECTOR_MEMCHECK]]: ; ZVFHMIN-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; ZVFHMIN-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 8 @@ -686,30 +661,29 @@ define void @fmin16(ptr noundef readonly captures(none) %input1, ptr noundef rea ; ZVFHMIN-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]] ; ZVFHMIN-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; ZVFHMIN: [[VECTOR_PH]]: -; ZVFHMIN-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() -; ZVFHMIN-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 8 -; ZVFHMIN-NEXT: [[N_MOD_VF:%.*]] = urem i64 4096, [[TMP10]] -; ZVFHMIN-NEXT: [[N_VEC:%.*]] = sub i64 4096, [[N_MOD_VF]] ; ZVFHMIN-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() ; ZVFHMIN-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 8 ; ZVFHMIN-NEXT: br label %[[VECTOR_BODY:.*]] ; ZVFHMIN: [[VECTOR_BODY]]: -; ZVFHMIN-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; ZVFHMIN-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; ZVFHMIN-NEXT: [[AVL:%.*]] = phi i64 [ 4096, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; ZVFHMIN-NEXT: [[TMP19:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true) ; ZVFHMIN-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT1]], i64 0, i64 [[INDEX]] -; ZVFHMIN-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x half>, ptr [[TMP13]], align 2 +; ZVFHMIN-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr align 2 [[TMP13]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP19]]) ; ZVFHMIN-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT2]], i64 0, i64 [[INDEX]] -; ZVFHMIN-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 8 x half>, ptr [[TMP15]], align 2 +; ZVFHMIN-NEXT: [[WIDE_LOAD5:%.*]] = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr align 2 [[TMP15]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP19]]) ; ZVFHMIN-NEXT: [[TMP17:%.*]] = call <vscale x 8 x half> @llvm.minimumnum.nxv8f16(<vscale x 8 x half> [[WIDE_LOAD]], <vscale x 8 x half> [[WIDE_LOAD5]]) ; ZVFHMIN-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[OUTPUT]], i64 0, i64 [[INDEX]] -; ZVFHMIN-NEXT: store <vscale x 8 x half> [[TMP17]], ptr [[TMP18]], align 2 -; ZVFHMIN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]] -; ZVFHMIN-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; ZVFHMIN-NEXT: call void @llvm.vp.store.nxv8f16.p0(<vscale x 8 x half> [[TMP17]], ptr align 2 [[TMP18]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP19]]) +; ZVFHMIN-NEXT: [[TMP16:%.*]] = zext i32 [[TMP19]] to i64 +; ZVFHMIN-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP16]], [[INDEX]] +; ZVFHMIN-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]] +; ZVFHMIN-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 4096 ; ZVFHMIN-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; ZVFHMIN: [[MIDDLE_BLOCK]]: -; ZVFHMIN-NEXT: [[CMP_N:%.*]] = icmp eq i64 4096, [[N_VEC]] -; ZVFHMIN-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; ZVFHMIN-NEXT: br label %[[EXIT:.*]] ; ZVFHMIN: [[SCALAR_PH]]: -; ZVFHMIN-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ] +; ZVFHMIN-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; ZVFHMIN-NEXT: br label %[[FOR_BODY:.*]] ; ZVFHMIN: [[FOR_BODY]]: ; ZVFHMIN-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] @@ -722,7 +696,7 @@ define void @fmin16(ptr noundef readonly captures(none) %input1, ptr noundef rea ; ZVFHMIN-NEXT: store half [[OUT]], ptr [[ARRAYIDX4]], align 2 ; ZVFHMIN-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; ZVFHMIN-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 4096 -; ZVFHMIN-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; ZVFHMIN-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; ZVFHMIN: [[EXIT]]: ; ZVFHMIN-NEXT: ret void ; @@ -755,11 +729,7 @@ define void @fmax16(ptr noundef readonly captures(none) %input1, ptr noundef rea ; CHECK-NEXT: [[INPUT23:%.*]] = ptrtoint ptr [[INPUT2]] to i64 ; CHECK-NEXT: [[INPUT12:%.*]] = ptrtoint ptr [[INPUT1]] to i64 ; CHECK-NEXT: [[OUTPUT1:%.*]] = ptrtoint ptr [[OUTPUT]] to i64 -; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP6]], 8 -; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP13]]) -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP14]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] ; CHECK: [[VECTOR_MEMCHECK]]: ; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP15]], 8 @@ -772,30 +742,29 @@ define void @fmax16(ptr noundef readonly captures(none) %input1, ptr noundef rea ; CHECK-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]] ; CHECK-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 8 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 4096, [[TMP10]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 4096, [[N_MOD_VF]] ; CHECK-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 8 ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 4096, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true) ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT1]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x half>, ptr [[TMP2]], align 2 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr align 2 [[TMP2]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP13]]) ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT2]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 8 x half>, ptr [[TMP4]], align 2 +; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr align 2 [[TMP4]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP13]]) ; CHECK-NEXT: [[TMP17:%.*]] = call <vscale x 8 x half> @llvm.maximumnum.nxv8f16(<vscale x 8 x half> [[WIDE_LOAD]], <vscale x 8 x half> [[WIDE_LOAD5]]) ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[OUTPUT]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: store <vscale x 8 x half> [[TMP17]], ptr [[TMP7]], align 2 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]] -; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-NEXT: call void @llvm.vp.store.nxv8f16.p0(<vscale x 8 x half> [[TMP17]], ptr align 2 [[TMP7]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP13]]) +; CHECK-NEXT: [[TMP20:%.*]] = zext i32 [[TMP13]] to i64 +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP20]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP20]] +; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 4096 +; CHECK-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 4096, [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; CHECK-NEXT: br label %[[EXIT:.*]] ; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; CHECK-NEXT: br label %[[FOR_BODY:.*]] ; CHECK: [[FOR_BODY]]: ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] @@ -808,7 +777,7 @@ define void @fmax16(ptr noundef readonly captures(none) %input1, ptr noundef rea ; CHECK-NEXT: store half [[OUT]], ptr [[ARRAYIDX4]], align 2 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 4096 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -818,11 +787,7 @@ define void @fmax16(ptr noundef readonly captures(none) %input1, ptr noundef rea ; ZVFHMIN-NEXT: [[INPUT23:%.*]] = ptrtoint ptr [[INPUT2]] to i64 ; ZVFHMIN-NEXT: [[INPUT12:%.*]] = ptrtoint ptr [[INPUT1]] to i64 ; ZVFHMIN-NEXT: [[OUTPUT1:%.*]] = ptrtoint ptr [[OUTPUT]] to i64 -; ZVFHMIN-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; ZVFHMIN-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8 -; ZVFHMIN-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]]) -; ZVFHMIN-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP2]] -; ZVFHMIN-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; ZVFHMIN-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] ; ZVFHMIN: [[VECTOR_MEMCHECK]]: ; ZVFHMIN-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; ZVFHMIN-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 8 @@ -835,30 +800,29 @@ define void @fmax16(ptr noundef readonly captures(none) %input1, ptr noundef rea ; ZVFHMIN-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]] ; ZVFHMIN-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; ZVFHMIN: [[VECTOR_PH]]: -; ZVFHMIN-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() -; ZVFHMIN-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 8 -; ZVFHMIN-NEXT: [[N_MOD_VF:%.*]] = urem i64 4096, [[TMP10]] -; ZVFHMIN-NEXT: [[N_VEC:%.*]] = sub i64 4096, [[N_MOD_VF]] ; ZVFHMIN-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() ; ZVFHMIN-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 8 ; ZVFHMIN-NEXT: br label %[[VECTOR_BODY:.*]] ; ZVFHMIN: [[VECTOR_BODY]]: -; ZVFHMIN-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; ZVFHMIN-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; ZVFHMIN-NEXT: [[AVL:%.*]] = phi i64 [ 4096, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; ZVFHMIN-NEXT: [[TMP19:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true) ; ZVFHMIN-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT1]], i64 0, i64 [[INDEX]] -; ZVFHMIN-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x half>, ptr [[TMP13]], align 2 +; ZVFHMIN-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr align 2 [[TMP13]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP19]]) ; ZVFHMIN-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT2]], i64 0, i64 [[INDEX]] -; ZVFHMIN-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 8 x half>, ptr [[TMP15]], align 2 +; ZVFHMIN-NEXT: [[WIDE_LOAD5:%.*]] = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr align 2 [[TMP15]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP19]]) ; ZVFHMIN-NEXT: [[TMP17:%.*]] = call <vscale x 8 x half> @llvm.maximumnum.nxv8f16(<vscale x 8 x half> [[WIDE_LOAD]], <vscale x 8 x half> [[WIDE_LOAD5]]) ; ZVFHMIN-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[OUTPUT]], i64 0, i64 [[INDEX]] -; ZVFHMIN-NEXT: store <vscale x 8 x half> [[TMP17]], ptr [[TMP18]], align 2 -; ZVFHMIN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]] -; ZVFHMIN-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; ZVFHMIN-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; ZVFHMIN-NEXT: call void @llvm.vp.store.nxv8f16.p0(<vscale x 8 x half> [[TMP17]], ptr align 2 [[TMP18]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP19]]) +; ZVFHMIN-NEXT: [[TMP16:%.*]] = zext i32 [[TMP19]] to i64 +; ZVFHMIN-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP16]], [[INDEX]] +; ZVFHMIN-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]] +; ZVFHMIN-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 4096 +; ZVFHMIN-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; ZVFHMIN: [[MIDDLE_BLOCK]]: -; ZVFHMIN-NEXT: [[CMP_N:%.*]] = icmp eq i64 4096, [[N_VEC]] -; ZVFHMIN-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; ZVFHMIN-NEXT: br label %[[EXIT:.*]] ; ZVFHMIN: [[SCALAR_PH]]: -; ZVFHMIN-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ] +; ZVFHMIN-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; ZVFHMIN-NEXT: br label %[[FOR_BODY:.*]] ; ZVFHMIN: [[FOR_BODY]]: ; ZVFHMIN-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] @@ -871,7 +835,7 @@ define void @fmax16(ptr noundef readonly captures(none) %input1, ptr noundef rea ; ZVFHMIN-NEXT: store half [[OUT]], ptr [[ARRAYIDX4]], align 2 ; ZVFHMIN-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; ZVFHMIN-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 4096 -; ZVFHMIN-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; ZVFHMIN-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; ZVFHMIN: [[EXIT]]: ; ZVFHMIN-NEXT: ret void ; @@ -907,10 +871,11 @@ declare half @llvm.maximumnum.f16(half, half) ; CHECK: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]]} ; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META1]], [[META2]]} ; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META1]]} -; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META1]], [[META2]]} -; CHECK: [[LOOP11]] = distinct !{[[LOOP11]], [[META1]]} -; CHECK: [[LOOP12]] = distinct !{[[LOOP12]], [[META1]], [[META2]]} -; CHECK: [[LOOP13]] = distinct !{[[LOOP13]], [[META1]]} +; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META1]], [[META11:![0-9]+]], [[META2]]} +; CHECK: [[META11]] = !{!"llvm.loop.isvectorized.tailfoldingstyle", !"evl"} +; CHECK: [[LOOP12]] = distinct !{[[LOOP12]], [[META1]]} +; CHECK: [[LOOP13]] = distinct !{[[LOOP13]], [[META1]], [[META11]], [[META2]]} +; CHECK: [[LOOP14]] = distinct !{[[LOOP14]], [[META1]]} ;. ; ZVFHMIN: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} ; ZVFHMIN: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} @@ -922,8 +887,9 @@ declare half @llvm.maximumnum.f16(half, half) ; ZVFHMIN: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]]} ; ZVFHMIN: [[LOOP8]] = distinct !{[[LOOP8]], [[META1]], [[META2]]} ; ZVFHMIN: [[LOOP9]] = distinct !{[[LOOP9]], [[META1]]} -; ZVFHMIN: [[LOOP10]] = distinct !{[[LOOP10]], [[META1]], [[META2]]} -; ZVFHMIN: [[LOOP11]] = distinct !{[[LOOP11]], [[META1]]} -; ZVFHMIN: [[LOOP12]] = distinct !{[[LOOP12]], [[META1]], [[META2]]} -; ZVFHMIN: [[LOOP13]] = distinct !{[[LOOP13]], [[META1]]} +; ZVFHMIN: [[LOOP10]] = distinct !{[[LOOP10]], [[META1]], [[META11:![0-9]+]], [[META2]]} +; ZVFHMIN: [[META11]] = !{!"llvm.loop.isvectorized.tailfoldingstyle", !"evl"} +; ZVFHMIN: [[LOOP12]] = distinct !{[[LOOP12]], [[META1]]} +; ZVFHMIN: [[LOOP13]] = distinct !{[[LOOP13]], [[META1]], [[META11]], [[META2]]} +; ZVFHMIN: [[LOOP14]] = distinct !{[[LOOP14]], [[META1]]} ;. diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/force-vect-msg.ll b/llvm/test/Transforms/LoopVectorize/RISCV/force-vect-msg.ll index 1ea70b6d..ae18c63 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/force-vect-msg.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/force-vect-msg.ll @@ -3,8 +3,8 @@ ; CHECK: LV: Loop hints: force=enabled ; CHECK: LV: Scalar loop costs: 4. -; ChosenFactor.Cost is 4, but the real cost will be divided by the width, which is 2. -; CHECK: Cost for VF 2: 4 (Estimated cost per lane: 2.0) +; ChosenFactor.Cost is 9, but the real cost will be divided by the width, which is 2.2. +; CHECK: Cost for VF vscale x 2: 9 (Estimated cost per lane: 2.2) ; Regardless of force vectorization or not, this loop will eventually be vectorized because of the cost model. ; Therefore, the following message does not need to be printed even if vectorization is explicitly forced in the metadata. ; CHECK-NOT: LV: Vectorization seems to be not beneficial, but was forced by a user. diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/induction-costs.ll b/llvm/test/Transforms/LoopVectorize/RISCV/induction-costs.ll index e6825fa..60f7c24 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/induction-costs.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/induction-costs.ll @@ -20,11 +20,7 @@ define void @skip_free_iv_truncate(i16 %x, ptr %A) #0 { ; CHECK-NEXT: [[TMP3:%.*]] = udiv i64 [[TMP2]], 3 ; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[UMIN21]], [[TMP3]] ; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[TMP4]], 1 -; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 8 -; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.umax.i64(i64 128, i64 [[TMP7]]) -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 [[TMP5]], [[TMP8]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] ; CHECK: [[VECTOR_MEMCHECK]]: ; CHECK-NEXT: [[TMP31:%.*]] = shl nsw i64 [[X_I64]], 1 ; CHECK-NEXT: [[SCEVGEP9:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP31]] @@ -57,42 +53,36 @@ define void @skip_free_iv_truncate(i16 %x, ptr %A) #0 { ; CHECK-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[FOUND_CONFLICT]], [[FOUND_CONFLICT19]] ; CHECK-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP45:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP46:%.*]] = mul nuw i64 [[TMP45]], 8 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP5]], [[TMP46]] -; CHECK-NEXT: [[TMP47:%.*]] = icmp eq i64 [[N_MOD_VF]], 0 -; CHECK-NEXT: [[TMP48:%.*]] = select i1 [[TMP47]], i64 [[TMP46]], i64 [[N_MOD_VF]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP5]], [[TMP48]] ; CHECK-NEXT: [[TMP51:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP52:%.*]] = mul nuw i64 [[TMP51]], 8 -; CHECK-NEXT: [[TMP49:%.*]] = mul i64 [[N_VEC]], 3 -; CHECK-NEXT: [[IND_END:%.*]] = add i64 [[X_I64]], [[TMP49]] -; CHECK-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i32 -; CHECK-NEXT: [[TMP50:%.*]] = mul i32 [[DOTCAST]], 3 -; CHECK-NEXT: [[IND_END22:%.*]] = add i32 [[X_I32]], [[TMP50]] ; CHECK-NEXT: [[TMP53:%.*]] = call <vscale x 8 x i64> @llvm.stepvector.nxv8i64() ; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[X_I64]], i64 0 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer ; CHECK-NEXT: [[TMP55:%.*]] = mul <vscale x 8 x i64> [[TMP53]], splat (i64 3) ; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 8 x i64> [[DOTSPLAT]], [[TMP55]] -; CHECK-NEXT: [[TMP58:%.*]] = mul i64 3, [[TMP52]] -; CHECK-NEXT: [[DOTSPLATINSERT24:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP58]], i64 0 -; CHECK-NEXT: [[DOTSPLAT25:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT24]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 8 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP5]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP27:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true) +; CHECK-NEXT: [[TMP28:%.*]] = zext i32 [[TMP27]] to i64 +; CHECK-NEXT: [[TMP58:%.*]] = mul i64 3, [[TMP28]] +; CHECK-NEXT: [[DOTSPLATINSERT24:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP58]], i64 0 +; CHECK-NEXT: [[DOTSPLAT25:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT24]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer ; CHECK-NEXT: [[TMP59:%.*]] = getelementptr i16, ptr [[A]], <vscale x 8 x i64> [[VEC_IND]] -; CHECK-NEXT: call void @llvm.masked.scatter.nxv8i16.nxv8p0(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x ptr> [[TMP59]], i32 2, <vscale x 8 x i1> splat (i1 true)), !alias.scope [[META0:![0-9]+]], !noalias [[META3:![0-9]+]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP52]] +; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i16.nxv8p0(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x ptr> align 2 [[TMP59]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP27]]), !alias.scope [[META0:![0-9]+]], !noalias [[META3:![0-9]+]] +; CHECK-NEXT: [[TMP47:%.*]] = zext i32 [[TMP27]] to i64 +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP47]], [[EVL_BASED_IV]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP47]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 8 x i64> [[VEC_IND]], [[DOTSPLAT25]] -; CHECK-NEXT: [[TMP60:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP60]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-NEXT: [[TMP48:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TMP5]] +; CHECK-NEXT: br i1 [[TMP48]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[SCALAR_PH]] +; CHECK-NEXT: br label %[[EXIT:.*]] ; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], %[[MIDDLE_BLOCK]] ], [ [[X_I64]], %[[ENTRY]] ], [ [[X_I64]], %[[VECTOR_MEMCHECK]] ] -; CHECK-NEXT: [[BC_RESUME_VAL13:%.*]] = phi i32 [ [[IND_END22]], %[[MIDDLE_BLOCK]] ], [ [[X_I32]], %[[ENTRY]] ], [ [[X_I32]], %[[VECTOR_MEMCHECK]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[X_I64]], %[[ENTRY]] ], [ [[X_I64]], %[[VECTOR_MEMCHECK]] ] +; CHECK-NEXT: [[BC_RESUME_VAL13:%.*]] = phi i32 [ [[X_I32]], %[[ENTRY]] ], [ [[X_I32]], %[[VECTOR_MEMCHECK]] ] ; CHECK-NEXT: br label %[[LOOP:.*]] ; CHECK: [[LOOP]]: ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] @@ -107,7 +97,7 @@ define void @skip_free_iv_truncate(i16 %x, ptr %A) #0 { ; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 3 ; CHECK-NEXT: [[TMP64]] = trunc i64 [[IV_NEXT]] to i32 ; CHECK-NEXT: [[C:%.*]] = icmp slt i64 [[IV]], 99 -; CHECK-NEXT: br i1 [[C]], label %[[LOOP]], label %[[EXIT:.*]], !llvm.loop [[LOOP9:![0-9]+]] +; CHECK-NEXT: br i1 [[C]], label %[[LOOP]], label %[[EXIT]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -144,8 +134,9 @@ attributes #0 = { "target-features"="+64bit,+v,+zvl256b" } ; CHECK: [[META3]] = !{[[META4:![0-9]+]], [[META5:![0-9]+]]} ; CHECK: [[META4]] = distinct !{[[META4]], [[META2]]} ; CHECK: [[META5]] = distinct !{[[META5]], [[META2]]} -; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META7:![0-9]+]], [[META8:![0-9]+]]} +; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META7:![0-9]+]], [[META8:![0-9]+]], [[META9:![0-9]+]]} ; CHECK: [[META7]] = !{!"llvm.loop.isvectorized", i32 1} -; CHECK: [[META8]] = !{!"llvm.loop.unroll.runtime.disable"} -; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META7]]} +; CHECK: [[META8]] = !{!"llvm.loop.isvectorized.tailfoldingstyle", !"evl"} +; CHECK: [[META9]] = !{!"llvm.loop.unroll.runtime.disable"} +; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META7]]} ;. diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll index 2f92ab5..1b2ac80 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll @@ -23,8 +23,6 @@ define i32 @add_i16_i32(ptr nocapture readonly %x, i32 %n) { ; OUTLOOP-NEXT: [[TMP3:%.*]] = mul nuw i32 [[TMP2]], 4 ; OUTLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N]], [[TMP3]] ; OUTLOOP-NEXT: [[N_VEC:%.*]] = sub i32 [[N]], [[N_MOD_VF]] -; OUTLOOP-NEXT: [[TMP4:%.*]] = call i32 @llvm.vscale.i32() -; OUTLOOP-NEXT: [[TMP5:%.*]] = mul nuw i32 [[TMP4]], 4 ; OUTLOOP-NEXT: br label [[VECTOR_BODY:%.*]] ; OUTLOOP: vector.body: ; OUTLOOP-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -33,7 +31,7 @@ define i32 @add_i16_i32(ptr nocapture readonly %x, i32 %n) { ; OUTLOOP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i16>, ptr [[TMP7]], align 2 ; OUTLOOP-NEXT: [[TMP9:%.*]] = sext <vscale x 4 x i16> [[WIDE_LOAD]] to <vscale x 4 x i32> ; OUTLOOP-NEXT: [[TMP10]] = add <vscale x 4 x i32> [[VEC_PHI]], [[TMP9]] -; OUTLOOP-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP5]] +; OUTLOOP-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP3]] ; OUTLOOP-NEXT: [[TMP11:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] ; OUTLOOP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; OUTLOOP: middle.block: @@ -75,8 +73,6 @@ define i32 @add_i16_i32(ptr nocapture readonly %x, i32 %n) { ; INLOOP-NEXT: [[TMP3:%.*]] = mul nuw i32 [[TMP2]], 8 ; INLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N]], [[TMP3]] ; INLOOP-NEXT: [[N_VEC:%.*]] = sub i32 [[N]], [[N_MOD_VF]] -; INLOOP-NEXT: [[TMP4:%.*]] = call i32 @llvm.vscale.i32() -; INLOOP-NEXT: [[TMP5:%.*]] = mul nuw i32 [[TMP4]], 8 ; INLOOP-NEXT: br label [[VECTOR_BODY:%.*]] ; INLOOP: vector.body: ; INLOOP-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -86,7 +82,7 @@ define i32 @add_i16_i32(ptr nocapture readonly %x, i32 %n) { ; INLOOP-NEXT: [[TMP9:%.*]] = sext <vscale x 8 x i16> [[WIDE_LOAD]] to <vscale x 8 x i32> ; INLOOP-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.nxv8i32(<vscale x 8 x i32> [[TMP9]]) ; INLOOP-NEXT: [[TMP11]] = add i32 [[VEC_PHI]], [[TMP10]] -; INLOOP-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP5]] +; INLOOP-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP3]] ; INLOOP-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] ; INLOOP-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; INLOOP: middle.block: @@ -120,12 +116,6 @@ define i32 @add_i16_i32(ptr nocapture readonly %x, i32 %n) { ; IF-EVL-OUTLOOP: for.body.preheader: ; IF-EVL-OUTLOOP-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; IF-EVL-OUTLOOP: vector.ph: -; IF-EVL-OUTLOOP-NEXT: [[TMP0:%.*]] = call i32 @llvm.vscale.i32() -; IF-EVL-OUTLOOP-NEXT: [[TMP1:%.*]] = mul nuw i32 [[TMP0]], 4 -; IF-EVL-OUTLOOP-NEXT: [[TMP2:%.*]] = sub i32 [[TMP1]], 1 -; IF-EVL-OUTLOOP-NEXT: [[N_RND_UP:%.*]] = add i32 [[N]], [[TMP2]] -; IF-EVL-OUTLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N_RND_UP]], [[TMP1]] -; IF-EVL-OUTLOOP-NEXT: [[N_VEC:%.*]] = sub i32 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-OUTLOOP-NEXT: [[TMP3:%.*]] = call i32 @llvm.vscale.i32() ; IF-EVL-OUTLOOP-NEXT: [[TMP4:%.*]] = mul nuw i32 [[TMP3]], 4 ; IF-EVL-OUTLOOP-NEXT: br label [[VECTOR_BODY:%.*]] @@ -174,12 +164,6 @@ define i32 @add_i16_i32(ptr nocapture readonly %x, i32 %n) { ; IF-EVL-INLOOP: for.body.preheader: ; IF-EVL-INLOOP-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; IF-EVL-INLOOP: vector.ph: -; IF-EVL-INLOOP-NEXT: [[TMP0:%.*]] = call i32 @llvm.vscale.i32() -; IF-EVL-INLOOP-NEXT: [[TMP1:%.*]] = mul nuw i32 [[TMP0]], 8 -; IF-EVL-INLOOP-NEXT: [[TMP2:%.*]] = sub i32 [[TMP1]], 1 -; IF-EVL-INLOOP-NEXT: [[N_RND_UP:%.*]] = add i32 [[N]], [[TMP2]] -; IF-EVL-INLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N_RND_UP]], [[TMP1]] -; IF-EVL-INLOOP-NEXT: [[N_VEC:%.*]] = sub i32 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-INLOOP-NEXT: [[TMP3:%.*]] = call i32 @llvm.vscale.i32() ; IF-EVL-INLOOP-NEXT: [[TMP4:%.*]] = mul nuw i32 [[TMP3]], 8 ; IF-EVL-INLOOP-NEXT: br label [[VECTOR_BODY:%.*]] @@ -252,8 +236,6 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) { ; OUTLOOP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; OUTLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; OUTLOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; OUTLOOP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; OUTLOOP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; OUTLOOP-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[START:%.*]], i64 0 ; OUTLOOP-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer ; OUTLOOP-NEXT: br label [[VECTOR_BODY:%.*]] @@ -264,7 +246,7 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) { ; OUTLOOP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 4 ; OUTLOOP-NEXT: [[TMP9:%.*]] = icmp slt <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]] ; OUTLOOP-NEXT: [[TMP10]] = select <vscale x 4 x i1> [[TMP9]], <vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> [[VEC_PHI]] -; OUTLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; OUTLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; OUTLOOP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; OUTLOOP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; OUTLOOP: middle.block: @@ -300,8 +282,6 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) { ; INLOOP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; INLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; INLOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; INLOOP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; INLOOP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; INLOOP-NEXT: br label [[VECTOR_BODY:%.*]] ; INLOOP: vector.body: ; INLOOP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -310,7 +290,7 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) { ; INLOOP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 4 ; INLOOP-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.smin.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]]) ; INLOOP-NEXT: [[RDX_MINMAX]] = call i32 @llvm.smin.i32(i32 [[TMP9]], i32 [[VEC_PHI]]) -; INLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; INLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; INLOOP-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; INLOOP-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; INLOOP: middle.block: @@ -338,12 +318,6 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) { ; IF-EVL-OUTLOOP-NEXT: entry: ; IF-EVL-OUTLOOP-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; IF-EVL-OUTLOOP: vector.ph: -; IF-EVL-OUTLOOP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-OUTLOOP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; IF-EVL-OUTLOOP-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1 -; IF-EVL-OUTLOOP-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]] -; IF-EVL-OUTLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] -; IF-EVL-OUTLOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-OUTLOOP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-OUTLOOP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; IF-EVL-OUTLOOP-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[START:%.*]], i64 0 @@ -352,7 +326,7 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) { ; IF-EVL-OUTLOOP: vector.body: ; IF-EVL-OUTLOOP-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-OUTLOOP-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[BROADCAST_SPLAT]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ] -; IF-EVL-OUTLOOP-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; IF-EVL-OUTLOOP-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-OUTLOOP-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; IF-EVL-OUTLOOP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]] ; IF-EVL-OUTLOOP-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]]) @@ -389,19 +363,13 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) { ; IF-EVL-INLOOP-NEXT: entry: ; IF-EVL-INLOOP-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; IF-EVL-INLOOP: vector.ph: -; IF-EVL-INLOOP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-INLOOP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; IF-EVL-INLOOP-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1 -; IF-EVL-INLOOP-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]] -; IF-EVL-INLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] -; IF-EVL-INLOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-INLOOP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-INLOOP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; IF-EVL-INLOOP-NEXT: br label [[VECTOR_BODY:%.*]] ; IF-EVL-INLOOP: vector.body: ; IF-EVL-INLOOP-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-INLOOP-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START:%.*]], [[VECTOR_PH]] ], [ [[RDX_MINMAX:%.*]], [[VECTOR_BODY]] ] -; IF-EVL-INLOOP-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; IF-EVL-INLOOP-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-INLOOP-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; IF-EVL-INLOOP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]] ; IF-EVL-INLOOP-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]]) diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-accesses.ll b/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-accesses.ll index e226eea..8046394 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-accesses.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-accesses.ll @@ -6,41 +6,43 @@ define void @load_store_factor2_i32(ptr %p) { ; CHECK-LABEL: @load_store_factor2_i32( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP6:%.*]] = shl i64 [[INDEX]], 1 -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP6]] -; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i32>, ptr [[TMP7]], align 4 -; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC]]) +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP7]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer +; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32() +; CHECK-NEXT: [[TMP13:%.*]] = icmp ult <vscale x 4 x i32> [[TMP6]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[TMP14:%.*]] = shl i64 [[INDEX]], 1 +; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP14]] +; CHECK-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 8 x i1> @llvm.vector.interleave2.nxv8i1(<vscale x 4 x i1> [[TMP13]], <vscale x 4 x i1> [[TMP13]]) +; CHECK-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 8 x i32> @llvm.masked.load.nxv8i32.p0(ptr [[TMP15]], i32 4, <vscale x 8 x i1> [[INTERLEAVED_MASK]], <vscale x 8 x i32> poison) +; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_MASKED_VEC]]) ; CHECK-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 0 ; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 1 ; CHECK-NEXT: [[TMP10:%.*]] = add <vscale x 4 x i32> [[TMP8]], splat (i32 1) ; CHECK-NEXT: [[TMP11:%.*]] = add <vscale x 4 x i32> [[TMP9]], splat (i32 2) ; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32> [[TMP10]], <vscale x 4 x i32> [[TMP11]]) -; CHECK-NEXT: store <vscale x 8 x i32> [[INTERLEAVED_VEC]], ptr [[TMP7]], align 4 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-NEXT: [[INTERLEAVED_MASK1:%.*]] = call <vscale x 8 x i1> @llvm.vector.interleave2.nxv8i1(<vscale x 4 x i1> [[TMP13]], <vscale x 4 x i1> [[TMP13]]) +; CHECK-NEXT: call void @llvm.masked.store.nxv8i32.p0(<vscale x 8 x i32> [[INTERLEAVED_VEC]], ptr [[TMP15]], i32 4, <vscale x 8 x i1> [[INTERLEAVED_MASK1]]) +; CHECK-NEXT: [[TMP16:%.*]] = zext i32 [[TMP7]] to i64 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP16]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]] +; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] +; CHECK-NEXT: br label [[EXIT:%.*]] ; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ] ; CHECK-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1 ; CHECK-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET0]] ; CHECK-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4 @@ -53,7 +55,7 @@ define void @load_store_factor2_i32(ptr %p) { ; CHECK-NEXT: store i32 [[Y1]], ptr [[Q1]], align 4 ; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1 ; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -102,41 +104,43 @@ define void @load_store_factor2_i32(ptr %p) { ; ; SCALABLE-LABEL: @load_store_factor2_i32( ; SCALABLE-NEXT: entry: -; SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; SCALABLE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]] -; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; SCALABLE-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; SCALABLE: vector.ph: -; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] -; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; SCALABLE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; SCALABLE-NEXT: br label [[VECTOR_BODY:%.*]] ; SCALABLE: vector.body: ; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; SCALABLE-NEXT: [[TMP6:%.*]] = shl i64 [[INDEX]], 1 -; SCALABLE-NEXT: [[TMP7:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP6]] -; SCALABLE-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i32>, ptr [[TMP7]], align 4 -; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC]]) +; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; SCALABLE-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) +; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP7]], i64 0 +; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer +; SCALABLE-NEXT: [[TMP6:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32() +; SCALABLE-NEXT: [[TMP13:%.*]] = icmp ult <vscale x 4 x i32> [[TMP6]], [[BROADCAST_SPLAT]] +; SCALABLE-NEXT: [[TMP14:%.*]] = shl i64 [[INDEX]], 1 +; SCALABLE-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP14]] +; SCALABLE-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 8 x i1> @llvm.vector.interleave2.nxv8i1(<vscale x 4 x i1> [[TMP13]], <vscale x 4 x i1> [[TMP13]]) +; SCALABLE-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 8 x i32> @llvm.masked.load.nxv8i32.p0(ptr [[TMP15]], i32 4, <vscale x 8 x i1> [[INTERLEAVED_MASK]], <vscale x 8 x i32> poison) +; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_MASKED_VEC]]) ; SCALABLE-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 0 ; SCALABLE-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 1 ; SCALABLE-NEXT: [[TMP10:%.*]] = add <vscale x 4 x i32> [[TMP8]], splat (i32 1) ; SCALABLE-NEXT: [[TMP11:%.*]] = add <vscale x 4 x i32> [[TMP9]], splat (i32 2) ; SCALABLE-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32> [[TMP10]], <vscale x 4 x i32> [[TMP11]]) -; SCALABLE-NEXT: store <vscale x 8 x i32> [[INTERLEAVED_VEC]], ptr [[TMP7]], align 4 -; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; SCALABLE-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; SCALABLE-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; SCALABLE-NEXT: [[INTERLEAVED_MASK1:%.*]] = call <vscale x 8 x i1> @llvm.vector.interleave2.nxv8i1(<vscale x 4 x i1> [[TMP13]], <vscale x 4 x i1> [[TMP13]]) +; SCALABLE-NEXT: call void @llvm.masked.store.nxv8i32.p0(<vscale x 8 x i32> [[INTERLEAVED_VEC]], ptr [[TMP15]], i32 4, <vscale x 8 x i1> [[INTERLEAVED_MASK1]]) +; SCALABLE-NEXT: [[TMP16:%.*]] = zext i32 [[TMP7]] to i64 +; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP16]], [[INDEX]] +; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]] +; SCALABLE-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; SCALABLE-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; SCALABLE: middle.block: -; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; SCALABLE-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] +; SCALABLE-NEXT: br label [[EXIT:%.*]] ; SCALABLE: scalar.ph: -; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; SCALABLE-NEXT: br label [[LOOP:%.*]] ; SCALABLE: loop: -; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ] +; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ] ; SCALABLE-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1 ; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET0]] ; SCALABLE-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4 @@ -149,7 +153,7 @@ define void @load_store_factor2_i32(ptr %p) { ; SCALABLE-NEXT: store i32 [[Y1]], ptr [[Q1]], align 4 ; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1 ; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP3:![0-9]+]] +; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP4:![0-9]+]] ; SCALABLE: exit: ; SCALABLE-NEXT: ret void ; @@ -180,41 +184,43 @@ exit: define void @load_store_factor2_i64(ptr %p) { ; CHECK-LABEL: @load_store_factor2_i64( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP6:%.*]] = shl i64 [[INDEX]], 1 -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP6]] -; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 4 x i64>, ptr [[TMP7]], align 8 -; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.vector.deinterleave2.nxv4i64(<vscale x 4 x i64> [[WIDE_VEC]]) -; CHECK-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 0 +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP7]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32() +; CHECK-NEXT: [[TMP10:%.*]] = icmp ult <vscale x 2 x i32> [[TMP6]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[TMP8:%.*]] = shl i64 [[INDEX]], 1 +; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP8]] +; CHECK-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 4 x i1> @llvm.vector.interleave2.nxv4i1(<vscale x 2 x i1> [[TMP10]], <vscale x 2 x i1> [[TMP10]]) +; CHECK-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 4 x i64> @llvm.masked.load.nxv4i64.p0(ptr [[TMP14]], i32 8, <vscale x 4 x i1> [[INTERLEAVED_MASK]], <vscale x 4 x i64> poison) +; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.vector.deinterleave2.nxv4i64(<vscale x 4 x i64> [[WIDE_MASKED_VEC]]) +; CHECK-NEXT: [[TMP20:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 0 ; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 1 -; CHECK-NEXT: [[TMP10:%.*]] = add <vscale x 2 x i64> [[TMP8]], splat (i64 1) +; CHECK-NEXT: [[TMP13:%.*]] = add <vscale x 2 x i64> [[TMP20]], splat (i64 1) ; CHECK-NEXT: [[TMP11:%.*]] = add <vscale x 2 x i64> [[TMP9]], splat (i64 2) -; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 4 x i64> @llvm.vector.interleave2.nxv4i64(<vscale x 2 x i64> [[TMP10]], <vscale x 2 x i64> [[TMP11]]) -; CHECK-NEXT: store <vscale x 4 x i64> [[INTERLEAVED_VEC]], ptr [[TMP7]], align 8 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 4 x i64> @llvm.vector.interleave2.nxv4i64(<vscale x 2 x i64> [[TMP13]], <vscale x 2 x i64> [[TMP11]]) +; CHECK-NEXT: [[INTERLEAVED_MASK1:%.*]] = call <vscale x 4 x i1> @llvm.vector.interleave2.nxv4i1(<vscale x 2 x i1> [[TMP10]], <vscale x 2 x i1> [[TMP10]]) +; CHECK-NEXT: call void @llvm.masked.store.nxv4i64.p0(<vscale x 4 x i64> [[INTERLEAVED_VEC]], ptr [[TMP14]], i32 8, <vscale x 4 x i1> [[INTERLEAVED_MASK1]]) +; CHECK-NEXT: [[TMP16:%.*]] = zext i32 [[TMP7]] to i64 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP16]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]] +; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] +; CHECK-NEXT: br label [[EXIT:%.*]] ; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ] ; CHECK-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1 ; CHECK-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] ; CHECK-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 @@ -227,7 +233,7 @@ define void @load_store_factor2_i64(ptr %p) { ; CHECK-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8 ; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1 ; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -276,41 +282,43 @@ define void @load_store_factor2_i64(ptr %p) { ; ; SCALABLE-LABEL: @load_store_factor2_i64( ; SCALABLE-NEXT: entry: -; SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; SCALABLE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]] -; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; SCALABLE-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; SCALABLE: vector.ph: -; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 -; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] -; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; SCALABLE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; SCALABLE-NEXT: br label [[VECTOR_BODY:%.*]] ; SCALABLE: vector.body: ; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; SCALABLE-NEXT: [[TMP6:%.*]] = shl i64 [[INDEX]], 1 -; SCALABLE-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP6]] -; SCALABLE-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 4 x i64>, ptr [[TMP7]], align 8 -; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.vector.deinterleave2.nxv4i64(<vscale x 4 x i64> [[WIDE_VEC]]) -; SCALABLE-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 0 +; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; SCALABLE-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) +; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP7]], i64 0 +; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer +; SCALABLE-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32() +; SCALABLE-NEXT: [[TMP10:%.*]] = icmp ult <vscale x 2 x i32> [[TMP6]], [[BROADCAST_SPLAT]] +; SCALABLE-NEXT: [[TMP8:%.*]] = shl i64 [[INDEX]], 1 +; SCALABLE-NEXT: [[TMP14:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP8]] +; SCALABLE-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 4 x i1> @llvm.vector.interleave2.nxv4i1(<vscale x 2 x i1> [[TMP10]], <vscale x 2 x i1> [[TMP10]]) +; SCALABLE-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 4 x i64> @llvm.masked.load.nxv4i64.p0(ptr [[TMP14]], i32 8, <vscale x 4 x i1> [[INTERLEAVED_MASK]], <vscale x 4 x i64> poison) +; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.vector.deinterleave2.nxv4i64(<vscale x 4 x i64> [[WIDE_MASKED_VEC]]) +; SCALABLE-NEXT: [[TMP20:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 0 ; SCALABLE-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 1 -; SCALABLE-NEXT: [[TMP10:%.*]] = add <vscale x 2 x i64> [[TMP8]], splat (i64 1) +; SCALABLE-NEXT: [[TMP13:%.*]] = add <vscale x 2 x i64> [[TMP20]], splat (i64 1) ; SCALABLE-NEXT: [[TMP11:%.*]] = add <vscale x 2 x i64> [[TMP9]], splat (i64 2) -; SCALABLE-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 4 x i64> @llvm.vector.interleave2.nxv4i64(<vscale x 2 x i64> [[TMP10]], <vscale x 2 x i64> [[TMP11]]) -; SCALABLE-NEXT: store <vscale x 4 x i64> [[INTERLEAVED_VEC]], ptr [[TMP7]], align 8 -; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; SCALABLE-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; SCALABLE-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; SCALABLE-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 4 x i64> @llvm.vector.interleave2.nxv4i64(<vscale x 2 x i64> [[TMP13]], <vscale x 2 x i64> [[TMP11]]) +; SCALABLE-NEXT: [[INTERLEAVED_MASK1:%.*]] = call <vscale x 4 x i1> @llvm.vector.interleave2.nxv4i1(<vscale x 2 x i1> [[TMP10]], <vscale x 2 x i1> [[TMP10]]) +; SCALABLE-NEXT: call void @llvm.masked.store.nxv4i64.p0(<vscale x 4 x i64> [[INTERLEAVED_VEC]], ptr [[TMP14]], i32 8, <vscale x 4 x i1> [[INTERLEAVED_MASK1]]) +; SCALABLE-NEXT: [[TMP16:%.*]] = zext i32 [[TMP7]] to i64 +; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP16]], [[INDEX]] +; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]] +; SCALABLE-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; SCALABLE-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; SCALABLE: middle.block: -; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; SCALABLE-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] +; SCALABLE-NEXT: br label [[EXIT:%.*]] ; SCALABLE: scalar.ph: -; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; SCALABLE-NEXT: br label [[LOOP:%.*]] ; SCALABLE: loop: -; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ] +; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ] ; SCALABLE-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1 ; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] ; SCALABLE-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 @@ -323,7 +331,7 @@ define void @load_store_factor2_i64(ptr %p) { ; SCALABLE-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8 ; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1 ; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP5:![0-9]+]] +; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP6:![0-9]+]] ; SCALABLE: exit: ; SCALABLE-NEXT: ret void ; @@ -354,24 +362,24 @@ exit: define void @load_store_factor3_i32(ptr %p) { ; CHECK-LABEL: @load_store_factor3_i32( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP7]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP0:%.*]] = mul i64 [[INDEX]], 3 -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP0]] -; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 12 x i32>, ptr [[TMP1]], align 4 -; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave3.nxv12i32(<vscale x 12 x i32> [[WIDE_VEC]]) +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP7]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer +; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32() +; CHECK-NEXT: [[TMP14:%.*]] = icmp ult <vscale x 4 x i32> [[TMP6]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[TMP16:%.*]] = mul i64 [[INDEX]], 3 +; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP16]] +; CHECK-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 12 x i1> @llvm.vector.interleave3.nxv12i1(<vscale x 4 x i1> [[TMP14]], <vscale x 4 x i1> [[TMP14]], <vscale x 4 x i1> [[TMP14]]) +; CHECK-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 12 x i32> @llvm.masked.load.nxv12i32.p0(ptr [[TMP17]], i32 4, <vscale x 12 x i1> [[INTERLEAVED_MASK]], <vscale x 12 x i32> poison) +; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave3.nxv12i32(<vscale x 12 x i32> [[WIDE_MASKED_VEC]]) ; CHECK-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 0 ; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 1 ; CHECK-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 2 @@ -379,18 +387,20 @@ define void @load_store_factor3_i32(ptr %p) { ; CHECK-NEXT: [[TMP12:%.*]] = add <vscale x 4 x i32> [[TMP9]], splat (i32 2) ; CHECK-NEXT: [[TMP13:%.*]] = add <vscale x 4 x i32> [[TMP10]], splat (i32 3) ; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 12 x i32> @llvm.vector.interleave3.nxv12i32(<vscale x 4 x i32> [[TMP11]], <vscale x 4 x i32> [[TMP12]], <vscale x 4 x i32> [[TMP13]]) -; CHECK-NEXT: store <vscale x 12 x i32> [[INTERLEAVED_VEC]], ptr [[TMP1]], align 4 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-NEXT: [[INTERLEAVED_MASK1:%.*]] = call <vscale x 12 x i1> @llvm.vector.interleave3.nxv12i1(<vscale x 4 x i1> [[TMP14]], <vscale x 4 x i1> [[TMP14]], <vscale x 4 x i1> [[TMP14]]) +; CHECK-NEXT: call void @llvm.masked.store.nxv12i32.p0(<vscale x 12 x i32> [[INTERLEAVED_VEC]], ptr [[TMP17]], i32 4, <vscale x 12 x i1> [[INTERLEAVED_MASK1]]) +; CHECK-NEXT: [[TMP19:%.*]] = zext i32 [[TMP7]] to i64 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP19]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP19]] +; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] +; CHECK-NEXT: br label [[EXIT:%.*]] ; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ] ; CHECK-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 3 ; CHECK-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET0]] ; CHECK-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4 @@ -408,7 +418,7 @@ define void @load_store_factor3_i32(ptr %p) { ; CHECK-NEXT: store i32 [[Y2]], ptr [[Q2]], align 4 ; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1 ; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -466,24 +476,24 @@ define void @load_store_factor3_i32(ptr %p) { ; ; SCALABLE-LABEL: @load_store_factor3_i32( ; SCALABLE-NEXT: entry: -; SCALABLE-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() -; SCALABLE-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4 -; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP7]] -; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; SCALABLE-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; SCALABLE: vector.ph: -; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] -; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; SCALABLE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; SCALABLE-NEXT: br label [[VECTOR_BODY:%.*]] ; SCALABLE: vector.body: ; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; SCALABLE-NEXT: [[TMP0:%.*]] = mul i64 [[INDEX]], 3 -; SCALABLE-NEXT: [[TMP1:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP0]] -; SCALABLE-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 12 x i32>, ptr [[TMP1]], align 4 -; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave3.nxv12i32(<vscale x 12 x i32> [[WIDE_VEC]]) +; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; SCALABLE-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) +; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP7]], i64 0 +; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer +; SCALABLE-NEXT: [[TMP6:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32() +; SCALABLE-NEXT: [[TMP14:%.*]] = icmp ult <vscale x 4 x i32> [[TMP6]], [[BROADCAST_SPLAT]] +; SCALABLE-NEXT: [[TMP16:%.*]] = mul i64 [[INDEX]], 3 +; SCALABLE-NEXT: [[TMP17:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP16]] +; SCALABLE-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 12 x i1> @llvm.vector.interleave3.nxv12i1(<vscale x 4 x i1> [[TMP14]], <vscale x 4 x i1> [[TMP14]], <vscale x 4 x i1> [[TMP14]]) +; SCALABLE-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 12 x i32> @llvm.masked.load.nxv12i32.p0(ptr [[TMP17]], i32 4, <vscale x 12 x i1> [[INTERLEAVED_MASK]], <vscale x 12 x i32> poison) +; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave3.nxv12i32(<vscale x 12 x i32> [[WIDE_MASKED_VEC]]) ; SCALABLE-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 0 ; SCALABLE-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 1 ; SCALABLE-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 2 @@ -491,18 +501,20 @@ define void @load_store_factor3_i32(ptr %p) { ; SCALABLE-NEXT: [[TMP12:%.*]] = add <vscale x 4 x i32> [[TMP9]], splat (i32 2) ; SCALABLE-NEXT: [[TMP13:%.*]] = add <vscale x 4 x i32> [[TMP10]], splat (i32 3) ; SCALABLE-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 12 x i32> @llvm.vector.interleave3.nxv12i32(<vscale x 4 x i32> [[TMP11]], <vscale x 4 x i32> [[TMP12]], <vscale x 4 x i32> [[TMP13]]) -; SCALABLE-NEXT: store <vscale x 12 x i32> [[INTERLEAVED_VEC]], ptr [[TMP1]], align 4 -; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; SCALABLE-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; SCALABLE-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; SCALABLE-NEXT: [[INTERLEAVED_MASK1:%.*]] = call <vscale x 12 x i1> @llvm.vector.interleave3.nxv12i1(<vscale x 4 x i1> [[TMP14]], <vscale x 4 x i1> [[TMP14]], <vscale x 4 x i1> [[TMP14]]) +; SCALABLE-NEXT: call void @llvm.masked.store.nxv12i32.p0(<vscale x 12 x i32> [[INTERLEAVED_VEC]], ptr [[TMP17]], i32 4, <vscale x 12 x i1> [[INTERLEAVED_MASK1]]) +; SCALABLE-NEXT: [[TMP19:%.*]] = zext i32 [[TMP7]] to i64 +; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP19]], [[INDEX]] +; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP19]] +; SCALABLE-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; SCALABLE-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; SCALABLE: middle.block: -; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; SCALABLE-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] +; SCALABLE-NEXT: br label [[EXIT:%.*]] ; SCALABLE: scalar.ph: -; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; SCALABLE-NEXT: br label [[LOOP:%.*]] ; SCALABLE: loop: -; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ] +; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ] ; SCALABLE-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 3 ; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET0]] ; SCALABLE-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4 @@ -520,7 +532,7 @@ define void @load_store_factor3_i32(ptr %p) { ; SCALABLE-NEXT: store i32 [[Y2]], ptr [[Q2]], align 4 ; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1 ; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP7:![0-9]+]] +; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP8:![0-9]+]] ; SCALABLE: exit: ; SCALABLE-NEXT: ret void ; @@ -557,43 +569,45 @@ exit: define void @load_store_factor3_i64(ptr %p) { ; CHECK-LABEL: @load_store_factor3_i64( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 2 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP7]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP0:%.*]] = mul i64 [[INDEX]], 3 -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP0]] -; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 6 x i64>, ptr [[TMP1]], align 8 -; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.vector.deinterleave3.nxv6i64(<vscale x 6 x i64> [[WIDE_VEC]]) -; CHECK-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 0 +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP7]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32() +; CHECK-NEXT: [[TMP11:%.*]] = icmp ult <vscale x 2 x i32> [[TMP6]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[TMP8:%.*]] = mul i64 [[INDEX]], 3 +; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP8]] +; CHECK-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 6 x i1> @llvm.vector.interleave3.nxv6i1(<vscale x 2 x i1> [[TMP11]], <vscale x 2 x i1> [[TMP11]], <vscale x 2 x i1> [[TMP11]]) +; CHECK-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 6 x i64> @llvm.masked.load.nxv6i64.p0(ptr [[TMP14]], i32 8, <vscale x 6 x i1> [[INTERLEAVED_MASK]], <vscale x 6 x i64> poison) +; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.vector.deinterleave3.nxv6i64(<vscale x 6 x i64> [[WIDE_MASKED_VEC]]) +; CHECK-NEXT: [[TMP23:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 0 ; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 1 ; CHECK-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 2 -; CHECK-NEXT: [[TMP11:%.*]] = add <vscale x 2 x i64> [[TMP8]], splat (i64 1) +; CHECK-NEXT: [[TMP25:%.*]] = add <vscale x 2 x i64> [[TMP23]], splat (i64 1) ; CHECK-NEXT: [[TMP12:%.*]] = add <vscale x 2 x i64> [[TMP9]], splat (i64 2) ; CHECK-NEXT: [[TMP13:%.*]] = add <vscale x 2 x i64> [[TMP10]], splat (i64 3) -; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 6 x i64> @llvm.vector.interleave3.nxv6i64(<vscale x 2 x i64> [[TMP11]], <vscale x 2 x i64> [[TMP12]], <vscale x 2 x i64> [[TMP13]]) -; CHECK-NEXT: store <vscale x 6 x i64> [[INTERLEAVED_VEC]], ptr [[TMP1]], align 8 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 6 x i64> @llvm.vector.interleave3.nxv6i64(<vscale x 2 x i64> [[TMP25]], <vscale x 2 x i64> [[TMP12]], <vscale x 2 x i64> [[TMP13]]) +; CHECK-NEXT: [[INTERLEAVED_MASK1:%.*]] = call <vscale x 6 x i1> @llvm.vector.interleave3.nxv6i1(<vscale x 2 x i1> [[TMP11]], <vscale x 2 x i1> [[TMP11]], <vscale x 2 x i1> [[TMP11]]) +; CHECK-NEXT: call void @llvm.masked.store.nxv6i64.p0(<vscale x 6 x i64> [[INTERLEAVED_VEC]], ptr [[TMP14]], i32 8, <vscale x 6 x i1> [[INTERLEAVED_MASK1]]) +; CHECK-NEXT: [[TMP19:%.*]] = zext i32 [[TMP7]] to i64 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP19]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP19]] +; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] +; CHECK-NEXT: br label [[EXIT:%.*]] ; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ] ; CHECK-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 3 ; CHECK-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] ; CHECK-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 @@ -611,7 +625,7 @@ define void @load_store_factor3_i64(ptr %p) { ; CHECK-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8 ; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1 ; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP9:![0-9]+]] +; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -669,43 +683,45 @@ define void @load_store_factor3_i64(ptr %p) { ; ; SCALABLE-LABEL: @load_store_factor3_i64( ; SCALABLE-NEXT: entry: -; SCALABLE-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() -; SCALABLE-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 2 -; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP7]] -; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; SCALABLE-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; SCALABLE: vector.ph: -; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 -; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] -; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; SCALABLE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; SCALABLE-NEXT: br label [[VECTOR_BODY:%.*]] ; SCALABLE: vector.body: ; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; SCALABLE-NEXT: [[TMP0:%.*]] = mul i64 [[INDEX]], 3 -; SCALABLE-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP0]] -; SCALABLE-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 6 x i64>, ptr [[TMP1]], align 8 -; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.vector.deinterleave3.nxv6i64(<vscale x 6 x i64> [[WIDE_VEC]]) -; SCALABLE-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 0 +; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; SCALABLE-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) +; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP7]], i64 0 +; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer +; SCALABLE-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32() +; SCALABLE-NEXT: [[TMP11:%.*]] = icmp ult <vscale x 2 x i32> [[TMP6]], [[BROADCAST_SPLAT]] +; SCALABLE-NEXT: [[TMP8:%.*]] = mul i64 [[INDEX]], 3 +; SCALABLE-NEXT: [[TMP14:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP8]] +; SCALABLE-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 6 x i1> @llvm.vector.interleave3.nxv6i1(<vscale x 2 x i1> [[TMP11]], <vscale x 2 x i1> [[TMP11]], <vscale x 2 x i1> [[TMP11]]) +; SCALABLE-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 6 x i64> @llvm.masked.load.nxv6i64.p0(ptr [[TMP14]], i32 8, <vscale x 6 x i1> [[INTERLEAVED_MASK]], <vscale x 6 x i64> poison) +; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.vector.deinterleave3.nxv6i64(<vscale x 6 x i64> [[WIDE_MASKED_VEC]]) +; SCALABLE-NEXT: [[TMP23:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 0 ; SCALABLE-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 1 ; SCALABLE-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 2 -; SCALABLE-NEXT: [[TMP11:%.*]] = add <vscale x 2 x i64> [[TMP8]], splat (i64 1) +; SCALABLE-NEXT: [[TMP25:%.*]] = add <vscale x 2 x i64> [[TMP23]], splat (i64 1) ; SCALABLE-NEXT: [[TMP12:%.*]] = add <vscale x 2 x i64> [[TMP9]], splat (i64 2) ; SCALABLE-NEXT: [[TMP13:%.*]] = add <vscale x 2 x i64> [[TMP10]], splat (i64 3) -; SCALABLE-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 6 x i64> @llvm.vector.interleave3.nxv6i64(<vscale x 2 x i64> [[TMP11]], <vscale x 2 x i64> [[TMP12]], <vscale x 2 x i64> [[TMP13]]) -; SCALABLE-NEXT: store <vscale x 6 x i64> [[INTERLEAVED_VEC]], ptr [[TMP1]], align 8 -; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; SCALABLE-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; SCALABLE-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; SCALABLE-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 6 x i64> @llvm.vector.interleave3.nxv6i64(<vscale x 2 x i64> [[TMP25]], <vscale x 2 x i64> [[TMP12]], <vscale x 2 x i64> [[TMP13]]) +; SCALABLE-NEXT: [[INTERLEAVED_MASK1:%.*]] = call <vscale x 6 x i1> @llvm.vector.interleave3.nxv6i1(<vscale x 2 x i1> [[TMP11]], <vscale x 2 x i1> [[TMP11]], <vscale x 2 x i1> [[TMP11]]) +; SCALABLE-NEXT: call void @llvm.masked.store.nxv6i64.p0(<vscale x 6 x i64> [[INTERLEAVED_VEC]], ptr [[TMP14]], i32 8, <vscale x 6 x i1> [[INTERLEAVED_MASK1]]) +; SCALABLE-NEXT: [[TMP19:%.*]] = zext i32 [[TMP7]] to i64 +; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP19]], [[INDEX]] +; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP19]] +; SCALABLE-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; SCALABLE-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; SCALABLE: middle.block: -; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; SCALABLE-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] +; SCALABLE-NEXT: br label [[EXIT:%.*]] ; SCALABLE: scalar.ph: -; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; SCALABLE-NEXT: br label [[LOOP:%.*]] ; SCALABLE: loop: -; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ] +; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ] ; SCALABLE-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 3 ; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] ; SCALABLE-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 @@ -723,7 +739,7 @@ define void @load_store_factor3_i64(ptr %p) { ; SCALABLE-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8 ; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1 ; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP9:![0-9]+]] +; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP10:![0-9]+]] ; SCALABLE: exit: ; SCALABLE-NEXT: ret void ; @@ -760,45 +776,47 @@ exit: define void @load_store_factor4(ptr %p) { ; CHECK-LABEL: @load_store_factor4( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[INDEX]], 4 -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP6]] -; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i64>, ptr [[TMP7]], align 8 -; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.vector.deinterleave4.nxv8i64(<vscale x 8 x i64> [[WIDE_VEC]]) -; CHECK-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 0 +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP7]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32() +; CHECK-NEXT: [[TMP10:%.*]] = icmp ult <vscale x 2 x i32> [[TMP6]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[TMP8:%.*]] = mul i64 [[INDEX]], 4 +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP8]] +; CHECK-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 8 x i1> @llvm.vector.interleave4.nxv8i1(<vscale x 2 x i1> [[TMP10]], <vscale x 2 x i1> [[TMP10]], <vscale x 2 x i1> [[TMP10]], <vscale x 2 x i1> [[TMP10]]) +; CHECK-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 8 x i64> @llvm.masked.load.nxv8i64.p0(ptr [[TMP9]], i32 8, <vscale x 8 x i1> [[INTERLEAVED_MASK]], <vscale x 8 x i64> poison) +; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.vector.deinterleave4.nxv8i64(<vscale x 8 x i64> [[WIDE_MASKED_VEC]]) +; CHECK-NEXT: [[TMP24:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 0 ; CHECK-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 1 ; CHECK-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 2 ; CHECK-NEXT: [[TMP13:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 3 -; CHECK-NEXT: [[TMP14:%.*]] = add <vscale x 2 x i64> [[TMP10]], splat (i64 1) +; CHECK-NEXT: [[TMP26:%.*]] = add <vscale x 2 x i64> [[TMP24]], splat (i64 1) ; CHECK-NEXT: [[TMP15:%.*]] = add <vscale x 2 x i64> [[TMP11]], splat (i64 2) ; CHECK-NEXT: [[TMP16:%.*]] = add <vscale x 2 x i64> [[TMP12]], splat (i64 3) ; CHECK-NEXT: [[TMP17:%.*]] = add <vscale x 2 x i64> [[TMP13]], splat (i64 4) -; CHECK-NEXT: [[INTERLEAVED_VEC4:%.*]] = call <vscale x 8 x i64> @llvm.vector.interleave4.nxv8i64(<vscale x 2 x i64> [[TMP14]], <vscale x 2 x i64> [[TMP15]], <vscale x 2 x i64> [[TMP16]], <vscale x 2 x i64> [[TMP17]]) -; CHECK-NEXT: store <vscale x 8 x i64> [[INTERLEAVED_VEC4]], ptr [[TMP7]], align 8 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 8 x i64> @llvm.vector.interleave4.nxv8i64(<vscale x 2 x i64> [[TMP26]], <vscale x 2 x i64> [[TMP15]], <vscale x 2 x i64> [[TMP16]], <vscale x 2 x i64> [[TMP17]]) +; CHECK-NEXT: [[INTERLEAVED_MASK1:%.*]] = call <vscale x 8 x i1> @llvm.vector.interleave4.nxv8i1(<vscale x 2 x i1> [[TMP10]], <vscale x 2 x i1> [[TMP10]], <vscale x 2 x i1> [[TMP10]], <vscale x 2 x i1> [[TMP10]]) +; CHECK-NEXT: call void @llvm.masked.store.nxv8i64.p0(<vscale x 8 x i64> [[INTERLEAVED_VEC]], ptr [[TMP9]], i32 8, <vscale x 8 x i1> [[INTERLEAVED_MASK1]]) +; CHECK-NEXT: [[TMP22:%.*]] = zext i32 [[TMP7]] to i64 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP22]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP22]] +; CHECK-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-NEXT: br i1 [[TMP23]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] +; CHECK-NEXT: br label [[EXIT:%.*]] ; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ] ; CHECK-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 4 ; CHECK-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] ; CHECK-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 @@ -821,7 +839,7 @@ define void @load_store_factor4(ptr %p) { ; CHECK-NEXT: store i64 [[Y3]], ptr [[Q3]], align 8 ; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1 ; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -886,45 +904,47 @@ define void @load_store_factor4(ptr %p) { ; ; SCALABLE-LABEL: @load_store_factor4( ; SCALABLE-NEXT: entry: -; SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; SCALABLE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]] -; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; SCALABLE-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; SCALABLE: vector.ph: -; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 -; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] -; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; SCALABLE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; SCALABLE-NEXT: br label [[VECTOR_BODY:%.*]] ; SCALABLE: vector.body: ; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; SCALABLE-NEXT: [[TMP6:%.*]] = mul i64 [[INDEX]], 4 -; SCALABLE-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP6]] -; SCALABLE-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i64>, ptr [[TMP7]], align 8 -; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.vector.deinterleave4.nxv8i64(<vscale x 8 x i64> [[WIDE_VEC]]) -; SCALABLE-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 0 +; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; SCALABLE-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) +; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP7]], i64 0 +; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer +; SCALABLE-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32() +; SCALABLE-NEXT: [[TMP10:%.*]] = icmp ult <vscale x 2 x i32> [[TMP6]], [[BROADCAST_SPLAT]] +; SCALABLE-NEXT: [[TMP8:%.*]] = mul i64 [[INDEX]], 4 +; SCALABLE-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP8]] +; SCALABLE-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 8 x i1> @llvm.vector.interleave4.nxv8i1(<vscale x 2 x i1> [[TMP10]], <vscale x 2 x i1> [[TMP10]], <vscale x 2 x i1> [[TMP10]], <vscale x 2 x i1> [[TMP10]]) +; SCALABLE-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 8 x i64> @llvm.masked.load.nxv8i64.p0(ptr [[TMP9]], i32 8, <vscale x 8 x i1> [[INTERLEAVED_MASK]], <vscale x 8 x i64> poison) +; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.vector.deinterleave4.nxv8i64(<vscale x 8 x i64> [[WIDE_MASKED_VEC]]) +; SCALABLE-NEXT: [[TMP24:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 0 ; SCALABLE-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 1 ; SCALABLE-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 2 ; SCALABLE-NEXT: [[TMP13:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 3 -; SCALABLE-NEXT: [[TMP14:%.*]] = add <vscale x 2 x i64> [[TMP10]], splat (i64 1) +; SCALABLE-NEXT: [[TMP26:%.*]] = add <vscale x 2 x i64> [[TMP24]], splat (i64 1) ; SCALABLE-NEXT: [[TMP15:%.*]] = add <vscale x 2 x i64> [[TMP11]], splat (i64 2) ; SCALABLE-NEXT: [[TMP16:%.*]] = add <vscale x 2 x i64> [[TMP12]], splat (i64 3) ; SCALABLE-NEXT: [[TMP17:%.*]] = add <vscale x 2 x i64> [[TMP13]], splat (i64 4) -; SCALABLE-NEXT: [[INTERLEAVED_VEC4:%.*]] = call <vscale x 8 x i64> @llvm.vector.interleave4.nxv8i64(<vscale x 2 x i64> [[TMP14]], <vscale x 2 x i64> [[TMP15]], <vscale x 2 x i64> [[TMP16]], <vscale x 2 x i64> [[TMP17]]) -; SCALABLE-NEXT: store <vscale x 8 x i64> [[INTERLEAVED_VEC4]], ptr [[TMP7]], align 8 -; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; SCALABLE-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; SCALABLE-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; SCALABLE-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 8 x i64> @llvm.vector.interleave4.nxv8i64(<vscale x 2 x i64> [[TMP26]], <vscale x 2 x i64> [[TMP15]], <vscale x 2 x i64> [[TMP16]], <vscale x 2 x i64> [[TMP17]]) +; SCALABLE-NEXT: [[INTERLEAVED_MASK1:%.*]] = call <vscale x 8 x i1> @llvm.vector.interleave4.nxv8i1(<vscale x 2 x i1> [[TMP10]], <vscale x 2 x i1> [[TMP10]], <vscale x 2 x i1> [[TMP10]], <vscale x 2 x i1> [[TMP10]]) +; SCALABLE-NEXT: call void @llvm.masked.store.nxv8i64.p0(<vscale x 8 x i64> [[INTERLEAVED_VEC]], ptr [[TMP9]], i32 8, <vscale x 8 x i1> [[INTERLEAVED_MASK1]]) +; SCALABLE-NEXT: [[TMP22:%.*]] = zext i32 [[TMP7]] to i64 +; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP22]], [[INDEX]] +; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP22]] +; SCALABLE-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; SCALABLE-NEXT: br i1 [[TMP23]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; SCALABLE: middle.block: -; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; SCALABLE-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] +; SCALABLE-NEXT: br label [[EXIT:%.*]] ; SCALABLE: scalar.ph: -; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; SCALABLE-NEXT: br label [[LOOP:%.*]] ; SCALABLE: loop: -; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ] +; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ] ; SCALABLE-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 4 ; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] ; SCALABLE-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 @@ -947,7 +967,7 @@ define void @load_store_factor4(ptr %p) { ; SCALABLE-NEXT: store i64 [[Y3]], ptr [[Q3]], align 8 ; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1 ; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP11:![0-9]+]] +; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP12:![0-9]+]] ; SCALABLE: exit: ; SCALABLE-NEXT: ret void ; @@ -990,44 +1010,48 @@ exit: define void @load_store_factor5(ptr %p) { ; CHECK-LABEL: @load_store_factor5( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP3]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP4]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP0:%.*]] = mul i64 [[INDEX]], 5 -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP0]] -; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 5 x i64>, ptr [[TMP1]], align 8 -; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } @llvm.vector.deinterleave5.nxv5i64(<vscale x 5 x i64> [[WIDE_VEC]]) -; CHECK-NEXT: [[TMP5:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 0 -; CHECK-NEXT: [[TMP6:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 1 -; CHECK-NEXT: [[TMP7:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 2 -; CHECK-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 3 -; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 4 -; CHECK-NEXT: [[TMP10:%.*]] = add <vscale x 1 x i64> [[TMP5]], splat (i64 1) -; CHECK-NEXT: [[TMP11:%.*]] = add <vscale x 1 x i64> [[TMP6]], splat (i64 2) -; CHECK-NEXT: [[TMP12:%.*]] = add <vscale x 1 x i64> [[TMP7]], splat (i64 3) -; CHECK-NEXT: [[TMP13:%.*]] = add <vscale x 1 x i64> [[TMP8]], splat (i64 4) -; CHECK-NEXT: [[TMP14:%.*]] = add <vscale x 1 x i64> [[TMP9]], splat (i64 5) -; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 5 x i64> @llvm.vector.interleave5.nxv5i64(<vscale x 1 x i64> [[TMP10]], <vscale x 1 x i64> [[TMP11]], <vscale x 1 x i64> [[TMP12]], <vscale x 1 x i64> [[TMP13]], <vscale x 1 x i64> [[TMP14]]) -; CHECK-NEXT: store <vscale x 5 x i64> [[INTERLEAVED_VEC]], ptr [[TMP1]], align 8 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]] -; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 1, i1 true) +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 1 x i32> poison, i32 [[TMP7]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 1 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer +; CHECK-NEXT: [[TMP18:%.*]] = call <vscale x 1 x i32> @llvm.stepvector.nxv1i32() +; CHECK-NEXT: [[TMP5:%.*]] = icmp ult <vscale x 1 x i32> [[TMP18]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[INDEX]], 5 +; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP6]] +; CHECK-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 5 x i1> @llvm.vector.interleave5.nxv5i1(<vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]]) +; CHECK-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 5 x i64> @llvm.masked.load.nxv5i64.p0(ptr [[TMP19]], i32 8, <vscale x 5 x i1> [[INTERLEAVED_MASK]], <vscale x 5 x i64> poison) +; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } @llvm.vector.deinterleave5.nxv5i64(<vscale x 5 x i64> [[WIDE_MASKED_VEC]]) +; CHECK-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 0 +; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 1 +; CHECK-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 2 +; CHECK-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 3 +; CHECK-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 4 +; CHECK-NEXT: [[TMP13:%.*]] = add <vscale x 1 x i64> [[TMP8]], splat (i64 1) +; CHECK-NEXT: [[TMP14:%.*]] = add <vscale x 1 x i64> [[TMP9]], splat (i64 2) +; CHECK-NEXT: [[TMP15:%.*]] = add <vscale x 1 x i64> [[TMP10]], splat (i64 3) +; CHECK-NEXT: [[TMP16:%.*]] = add <vscale x 1 x i64> [[TMP11]], splat (i64 4) +; CHECK-NEXT: [[TMP17:%.*]] = add <vscale x 1 x i64> [[TMP12]], splat (i64 5) +; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 5 x i64> @llvm.vector.interleave5.nxv5i64(<vscale x 1 x i64> [[TMP13]], <vscale x 1 x i64> [[TMP14]], <vscale x 1 x i64> [[TMP15]], <vscale x 1 x i64> [[TMP16]], <vscale x 1 x i64> [[TMP17]]) +; CHECK-NEXT: [[INTERLEAVED_MASK1:%.*]] = call <vscale x 5 x i1> @llvm.vector.interleave5.nxv5i1(<vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]]) +; CHECK-NEXT: call void @llvm.masked.store.nxv5i64.p0(<vscale x 5 x i64> [[INTERLEAVED_VEC]], ptr [[TMP19]], i32 8, <vscale x 5 x i1> [[INTERLEAVED_MASK1]]) +; CHECK-NEXT: [[TMP25:%.*]] = zext i32 [[TMP7]] to i64 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP25]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP25]] +; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] +; CHECK-NEXT: br label [[EXIT:%.*]] ; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ] ; CHECK-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 5 ; CHECK-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] ; CHECK-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 @@ -1055,7 +1079,7 @@ define void @load_store_factor5(ptr %p) { ; CHECK-NEXT: store i64 [[Y4]], ptr [[Q4]], align 8 ; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1 ; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP13:![0-9]+]] +; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -1129,44 +1153,48 @@ define void @load_store_factor5(ptr %p) { ; ; SCALABLE-LABEL: @load_store_factor5( ; SCALABLE-NEXT: entry: -; SCALABLE-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() -; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP3]] -; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; SCALABLE-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; SCALABLE: vector.ph: -; SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP4]] -; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() ; SCALABLE-NEXT: br label [[VECTOR_BODY:%.*]] ; SCALABLE: vector.body: ; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; SCALABLE-NEXT: [[TMP0:%.*]] = mul i64 [[INDEX]], 5 -; SCALABLE-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP0]] -; SCALABLE-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 5 x i64>, ptr [[TMP1]], align 8 -; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } @llvm.vector.deinterleave5.nxv5i64(<vscale x 5 x i64> [[WIDE_VEC]]) -; SCALABLE-NEXT: [[TMP5:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 0 -; SCALABLE-NEXT: [[TMP6:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 1 -; SCALABLE-NEXT: [[TMP7:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 2 -; SCALABLE-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 3 -; SCALABLE-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 4 -; SCALABLE-NEXT: [[TMP10:%.*]] = add <vscale x 1 x i64> [[TMP5]], splat (i64 1) -; SCALABLE-NEXT: [[TMP11:%.*]] = add <vscale x 1 x i64> [[TMP6]], splat (i64 2) -; SCALABLE-NEXT: [[TMP12:%.*]] = add <vscale x 1 x i64> [[TMP7]], splat (i64 3) -; SCALABLE-NEXT: [[TMP13:%.*]] = add <vscale x 1 x i64> [[TMP8]], splat (i64 4) -; SCALABLE-NEXT: [[TMP14:%.*]] = add <vscale x 1 x i64> [[TMP9]], splat (i64 5) -; SCALABLE-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 5 x i64> @llvm.vector.interleave5.nxv5i64(<vscale x 1 x i64> [[TMP10]], <vscale x 1 x i64> [[TMP11]], <vscale x 1 x i64> [[TMP12]], <vscale x 1 x i64> [[TMP13]], <vscale x 1 x i64> [[TMP14]]) -; SCALABLE-NEXT: store <vscale x 5 x i64> [[INTERLEAVED_VEC]], ptr [[TMP1]], align 8 -; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]] -; SCALABLE-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; SCALABLE-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; SCALABLE-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 1, i1 true) +; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 1 x i32> poison, i32 [[TMP7]], i64 0 +; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 1 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer +; SCALABLE-NEXT: [[TMP18:%.*]] = call <vscale x 1 x i32> @llvm.stepvector.nxv1i32() +; SCALABLE-NEXT: [[TMP5:%.*]] = icmp ult <vscale x 1 x i32> [[TMP18]], [[BROADCAST_SPLAT]] +; SCALABLE-NEXT: [[TMP6:%.*]] = mul i64 [[INDEX]], 5 +; SCALABLE-NEXT: [[TMP19:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP6]] +; SCALABLE-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 5 x i1> @llvm.vector.interleave5.nxv5i1(<vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]]) +; SCALABLE-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 5 x i64> @llvm.masked.load.nxv5i64.p0(ptr [[TMP19]], i32 8, <vscale x 5 x i1> [[INTERLEAVED_MASK]], <vscale x 5 x i64> poison) +; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } @llvm.vector.deinterleave5.nxv5i64(<vscale x 5 x i64> [[WIDE_MASKED_VEC]]) +; SCALABLE-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 0 +; SCALABLE-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 1 +; SCALABLE-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 2 +; SCALABLE-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 3 +; SCALABLE-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 4 +; SCALABLE-NEXT: [[TMP13:%.*]] = add <vscale x 1 x i64> [[TMP8]], splat (i64 1) +; SCALABLE-NEXT: [[TMP14:%.*]] = add <vscale x 1 x i64> [[TMP9]], splat (i64 2) +; SCALABLE-NEXT: [[TMP15:%.*]] = add <vscale x 1 x i64> [[TMP10]], splat (i64 3) +; SCALABLE-NEXT: [[TMP16:%.*]] = add <vscale x 1 x i64> [[TMP11]], splat (i64 4) +; SCALABLE-NEXT: [[TMP17:%.*]] = add <vscale x 1 x i64> [[TMP12]], splat (i64 5) +; SCALABLE-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 5 x i64> @llvm.vector.interleave5.nxv5i64(<vscale x 1 x i64> [[TMP13]], <vscale x 1 x i64> [[TMP14]], <vscale x 1 x i64> [[TMP15]], <vscale x 1 x i64> [[TMP16]], <vscale x 1 x i64> [[TMP17]]) +; SCALABLE-NEXT: [[INTERLEAVED_MASK1:%.*]] = call <vscale x 5 x i1> @llvm.vector.interleave5.nxv5i1(<vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]]) +; SCALABLE-NEXT: call void @llvm.masked.store.nxv5i64.p0(<vscale x 5 x i64> [[INTERLEAVED_VEC]], ptr [[TMP19]], i32 8, <vscale x 5 x i1> [[INTERLEAVED_MASK1]]) +; SCALABLE-NEXT: [[TMP25:%.*]] = zext i32 [[TMP7]] to i64 +; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP25]], [[INDEX]] +; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP25]] +; SCALABLE-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; SCALABLE-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; SCALABLE: middle.block: -; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; SCALABLE-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] +; SCALABLE-NEXT: br label [[EXIT:%.*]] ; SCALABLE: scalar.ph: -; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; SCALABLE-NEXT: br label [[LOOP:%.*]] ; SCALABLE: loop: -; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ] +; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ] ; SCALABLE-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 5 ; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] ; SCALABLE-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 @@ -1194,7 +1222,7 @@ define void @load_store_factor5(ptr %p) { ; SCALABLE-NEXT: store i64 [[Y4]], ptr [[Q4]], align 8 ; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1 ; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP13:![0-9]+]] +; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP14:![0-9]+]] ; SCALABLE: exit: ; SCALABLE-NEXT: ret void ; @@ -1243,46 +1271,50 @@ exit: define void @load_store_factor6(ptr %p) { ; CHECK-LABEL: @load_store_factor6( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP3]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP4]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP0:%.*]] = mul i64 [[INDEX]], 6 -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP0]] -; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 6 x i64>, ptr [[TMP1]], align 8 -; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } @llvm.vector.deinterleave6.nxv6i64(<vscale x 6 x i64> [[WIDE_VEC]]) -; CHECK-NEXT: [[TMP5:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 0 -; CHECK-NEXT: [[TMP6:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 1 -; CHECK-NEXT: [[TMP7:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 2 -; CHECK-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 3 -; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 4 -; CHECK-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 5 -; CHECK-NEXT: [[TMP11:%.*]] = add <vscale x 1 x i64> [[TMP5]], splat (i64 1) -; CHECK-NEXT: [[TMP12:%.*]] = add <vscale x 1 x i64> [[TMP6]], splat (i64 2) -; CHECK-NEXT: [[TMP13:%.*]] = add <vscale x 1 x i64> [[TMP7]], splat (i64 3) -; CHECK-NEXT: [[TMP14:%.*]] = add <vscale x 1 x i64> [[TMP8]], splat (i64 4) -; CHECK-NEXT: [[TMP15:%.*]] = add <vscale x 1 x i64> [[TMP9]], splat (i64 5) -; CHECK-NEXT: [[TMP16:%.*]] = add <vscale x 1 x i64> [[TMP10]], splat (i64 6) -; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 6 x i64> @llvm.vector.interleave6.nxv6i64(<vscale x 1 x i64> [[TMP11]], <vscale x 1 x i64> [[TMP12]], <vscale x 1 x i64> [[TMP13]], <vscale x 1 x i64> [[TMP14]], <vscale x 1 x i64> [[TMP15]], <vscale x 1 x i64> [[TMP16]]) -; CHECK-NEXT: store <vscale x 6 x i64> [[INTERLEAVED_VEC]], ptr [[TMP1]], align 8 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]] -; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 1, i1 true) +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 1 x i32> poison, i32 [[TMP7]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 1 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer +; CHECK-NEXT: [[TMP20:%.*]] = call <vscale x 1 x i32> @llvm.stepvector.nxv1i32() +; CHECK-NEXT: [[TMP5:%.*]] = icmp ult <vscale x 1 x i32> [[TMP20]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[INDEX]], 6 +; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP6]] +; CHECK-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 6 x i1> @llvm.vector.interleave6.nxv6i1(<vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]]) +; CHECK-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 6 x i64> @llvm.masked.load.nxv6i64.p0(ptr [[TMP21]], i32 8, <vscale x 6 x i1> [[INTERLEAVED_MASK]], <vscale x 6 x i64> poison) +; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } @llvm.vector.deinterleave6.nxv6i64(<vscale x 6 x i64> [[WIDE_MASKED_VEC]]) +; CHECK-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 0 +; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 1 +; CHECK-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 2 +; CHECK-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 3 +; CHECK-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 4 +; CHECK-NEXT: [[TMP13:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 5 +; CHECK-NEXT: [[TMP14:%.*]] = add <vscale x 1 x i64> [[TMP8]], splat (i64 1) +; CHECK-NEXT: [[TMP15:%.*]] = add <vscale x 1 x i64> [[TMP9]], splat (i64 2) +; CHECK-NEXT: [[TMP16:%.*]] = add <vscale x 1 x i64> [[TMP10]], splat (i64 3) +; CHECK-NEXT: [[TMP17:%.*]] = add <vscale x 1 x i64> [[TMP11]], splat (i64 4) +; CHECK-NEXT: [[TMP18:%.*]] = add <vscale x 1 x i64> [[TMP12]], splat (i64 5) +; CHECK-NEXT: [[TMP19:%.*]] = add <vscale x 1 x i64> [[TMP13]], splat (i64 6) +; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 6 x i64> @llvm.vector.interleave6.nxv6i64(<vscale x 1 x i64> [[TMP14]], <vscale x 1 x i64> [[TMP15]], <vscale x 1 x i64> [[TMP16]], <vscale x 1 x i64> [[TMP17]], <vscale x 1 x i64> [[TMP18]], <vscale x 1 x i64> [[TMP19]]) +; CHECK-NEXT: [[INTERLEAVED_MASK1:%.*]] = call <vscale x 6 x i1> @llvm.vector.interleave6.nxv6i1(<vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]]) +; CHECK-NEXT: call void @llvm.masked.store.nxv6i64.p0(<vscale x 6 x i64> [[INTERLEAVED_VEC]], ptr [[TMP21]], i32 8, <vscale x 6 x i1> [[INTERLEAVED_MASK1]]) +; CHECK-NEXT: [[TMP28:%.*]] = zext i32 [[TMP7]] to i64 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP28]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP28]] +; CHECK-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] +; CHECK-NEXT: br label [[EXIT:%.*]] ; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ] ; CHECK-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 6 ; CHECK-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] ; CHECK-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 @@ -1315,7 +1347,7 @@ define void @load_store_factor6(ptr %p) { ; CHECK-NEXT: store i64 [[Y5]], ptr [[Q5]], align 8 ; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1 ; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP15:![0-9]+]] +; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -1397,46 +1429,50 @@ define void @load_store_factor6(ptr %p) { ; ; SCALABLE-LABEL: @load_store_factor6( ; SCALABLE-NEXT: entry: -; SCALABLE-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() -; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP3]] -; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; SCALABLE-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; SCALABLE: vector.ph: -; SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP4]] -; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() ; SCALABLE-NEXT: br label [[VECTOR_BODY:%.*]] ; SCALABLE: vector.body: ; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; SCALABLE-NEXT: [[TMP0:%.*]] = mul i64 [[INDEX]], 6 -; SCALABLE-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP0]] -; SCALABLE-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 6 x i64>, ptr [[TMP1]], align 8 -; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } @llvm.vector.deinterleave6.nxv6i64(<vscale x 6 x i64> [[WIDE_VEC]]) -; SCALABLE-NEXT: [[TMP5:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 0 -; SCALABLE-NEXT: [[TMP6:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 1 -; SCALABLE-NEXT: [[TMP7:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 2 -; SCALABLE-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 3 -; SCALABLE-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 4 -; SCALABLE-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 5 -; SCALABLE-NEXT: [[TMP11:%.*]] = add <vscale x 1 x i64> [[TMP5]], splat (i64 1) -; SCALABLE-NEXT: [[TMP12:%.*]] = add <vscale x 1 x i64> [[TMP6]], splat (i64 2) -; SCALABLE-NEXT: [[TMP13:%.*]] = add <vscale x 1 x i64> [[TMP7]], splat (i64 3) -; SCALABLE-NEXT: [[TMP14:%.*]] = add <vscale x 1 x i64> [[TMP8]], splat (i64 4) -; SCALABLE-NEXT: [[TMP15:%.*]] = add <vscale x 1 x i64> [[TMP9]], splat (i64 5) -; SCALABLE-NEXT: [[TMP16:%.*]] = add <vscale x 1 x i64> [[TMP10]], splat (i64 6) -; SCALABLE-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 6 x i64> @llvm.vector.interleave6.nxv6i64(<vscale x 1 x i64> [[TMP11]], <vscale x 1 x i64> [[TMP12]], <vscale x 1 x i64> [[TMP13]], <vscale x 1 x i64> [[TMP14]], <vscale x 1 x i64> [[TMP15]], <vscale x 1 x i64> [[TMP16]]) -; SCALABLE-NEXT: store <vscale x 6 x i64> [[INTERLEAVED_VEC]], ptr [[TMP1]], align 8 -; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]] -; SCALABLE-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; SCALABLE-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; SCALABLE-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 1, i1 true) +; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 1 x i32> poison, i32 [[TMP7]], i64 0 +; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 1 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer +; SCALABLE-NEXT: [[TMP20:%.*]] = call <vscale x 1 x i32> @llvm.stepvector.nxv1i32() +; SCALABLE-NEXT: [[TMP5:%.*]] = icmp ult <vscale x 1 x i32> [[TMP20]], [[BROADCAST_SPLAT]] +; SCALABLE-NEXT: [[TMP6:%.*]] = mul i64 [[INDEX]], 6 +; SCALABLE-NEXT: [[TMP21:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP6]] +; SCALABLE-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 6 x i1> @llvm.vector.interleave6.nxv6i1(<vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]]) +; SCALABLE-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 6 x i64> @llvm.masked.load.nxv6i64.p0(ptr [[TMP21]], i32 8, <vscale x 6 x i1> [[INTERLEAVED_MASK]], <vscale x 6 x i64> poison) +; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } @llvm.vector.deinterleave6.nxv6i64(<vscale x 6 x i64> [[WIDE_MASKED_VEC]]) +; SCALABLE-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 0 +; SCALABLE-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 1 +; SCALABLE-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 2 +; SCALABLE-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 3 +; SCALABLE-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 4 +; SCALABLE-NEXT: [[TMP13:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 5 +; SCALABLE-NEXT: [[TMP14:%.*]] = add <vscale x 1 x i64> [[TMP8]], splat (i64 1) +; SCALABLE-NEXT: [[TMP15:%.*]] = add <vscale x 1 x i64> [[TMP9]], splat (i64 2) +; SCALABLE-NEXT: [[TMP16:%.*]] = add <vscale x 1 x i64> [[TMP10]], splat (i64 3) +; SCALABLE-NEXT: [[TMP17:%.*]] = add <vscale x 1 x i64> [[TMP11]], splat (i64 4) +; SCALABLE-NEXT: [[TMP18:%.*]] = add <vscale x 1 x i64> [[TMP12]], splat (i64 5) +; SCALABLE-NEXT: [[TMP19:%.*]] = add <vscale x 1 x i64> [[TMP13]], splat (i64 6) +; SCALABLE-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 6 x i64> @llvm.vector.interleave6.nxv6i64(<vscale x 1 x i64> [[TMP14]], <vscale x 1 x i64> [[TMP15]], <vscale x 1 x i64> [[TMP16]], <vscale x 1 x i64> [[TMP17]], <vscale x 1 x i64> [[TMP18]], <vscale x 1 x i64> [[TMP19]]) +; SCALABLE-NEXT: [[INTERLEAVED_MASK1:%.*]] = call <vscale x 6 x i1> @llvm.vector.interleave6.nxv6i1(<vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]]) +; SCALABLE-NEXT: call void @llvm.masked.store.nxv6i64.p0(<vscale x 6 x i64> [[INTERLEAVED_VEC]], ptr [[TMP21]], i32 8, <vscale x 6 x i1> [[INTERLEAVED_MASK1]]) +; SCALABLE-NEXT: [[TMP28:%.*]] = zext i32 [[TMP7]] to i64 +; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP28]], [[INDEX]] +; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP28]] +; SCALABLE-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; SCALABLE-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; SCALABLE: middle.block: -; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; SCALABLE-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] +; SCALABLE-NEXT: br label [[EXIT:%.*]] ; SCALABLE: scalar.ph: -; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; SCALABLE-NEXT: br label [[LOOP:%.*]] ; SCALABLE: loop: -; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ] +; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ] ; SCALABLE-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 6 ; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] ; SCALABLE-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 @@ -1469,7 +1505,7 @@ define void @load_store_factor6(ptr %p) { ; SCALABLE-NEXT: store i64 [[Y5]], ptr [[Q5]], align 8 ; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1 ; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP15:![0-9]+]] +; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP16:![0-9]+]] ; SCALABLE: exit: ; SCALABLE-NEXT: ret void ; @@ -1524,48 +1560,52 @@ exit: define void @load_store_factor7(ptr %p) { ; CHECK-LABEL: @load_store_factor7( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP3]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP4]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP0:%.*]] = mul i64 [[INDEX]], 7 -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP0]] -; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 7 x i64>, ptr [[TMP1]], align 8 -; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } @llvm.vector.deinterleave7.nxv7i64(<vscale x 7 x i64> [[WIDE_VEC]]) -; CHECK-NEXT: [[TMP5:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 0 -; CHECK-NEXT: [[TMP6:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 1 -; CHECK-NEXT: [[TMP7:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 2 -; CHECK-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 3 -; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 4 -; CHECK-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 5 -; CHECK-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 6 -; CHECK-NEXT: [[TMP12:%.*]] = add <vscale x 1 x i64> [[TMP5]], splat (i64 1) -; CHECK-NEXT: [[TMP13:%.*]] = add <vscale x 1 x i64> [[TMP6]], splat (i64 2) -; CHECK-NEXT: [[TMP14:%.*]] = add <vscale x 1 x i64> [[TMP7]], splat (i64 3) -; CHECK-NEXT: [[TMP15:%.*]] = add <vscale x 1 x i64> [[TMP8]], splat (i64 4) -; CHECK-NEXT: [[TMP16:%.*]] = add <vscale x 1 x i64> [[TMP9]], splat (i64 5) -; CHECK-NEXT: [[TMP17:%.*]] = add <vscale x 1 x i64> [[TMP10]], splat (i64 6) -; CHECK-NEXT: [[TMP18:%.*]] = add <vscale x 1 x i64> [[TMP11]], splat (i64 7) -; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 7 x i64> @llvm.vector.interleave7.nxv7i64(<vscale x 1 x i64> [[TMP12]], <vscale x 1 x i64> [[TMP13]], <vscale x 1 x i64> [[TMP14]], <vscale x 1 x i64> [[TMP15]], <vscale x 1 x i64> [[TMP16]], <vscale x 1 x i64> [[TMP17]], <vscale x 1 x i64> [[TMP18]]) -; CHECK-NEXT: store <vscale x 7 x i64> [[INTERLEAVED_VEC]], ptr [[TMP1]], align 8 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]] -; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 1, i1 true) +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 1 x i32> poison, i32 [[TMP7]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 1 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer +; CHECK-NEXT: [[TMP22:%.*]] = call <vscale x 1 x i32> @llvm.stepvector.nxv1i32() +; CHECK-NEXT: [[TMP5:%.*]] = icmp ult <vscale x 1 x i32> [[TMP22]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[INDEX]], 7 +; CHECK-NEXT: [[TMP23:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP6]] +; CHECK-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 7 x i1> @llvm.vector.interleave7.nxv7i1(<vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]]) +; CHECK-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 7 x i64> @llvm.masked.load.nxv7i64.p0(ptr [[TMP23]], i32 8, <vscale x 7 x i1> [[INTERLEAVED_MASK]], <vscale x 7 x i64> poison) +; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } @llvm.vector.deinterleave7.nxv7i64(<vscale x 7 x i64> [[WIDE_MASKED_VEC]]) +; CHECK-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 0 +; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 1 +; CHECK-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 2 +; CHECK-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 3 +; CHECK-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 4 +; CHECK-NEXT: [[TMP13:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 5 +; CHECK-NEXT: [[TMP14:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 6 +; CHECK-NEXT: [[TMP15:%.*]] = add <vscale x 1 x i64> [[TMP8]], splat (i64 1) +; CHECK-NEXT: [[TMP16:%.*]] = add <vscale x 1 x i64> [[TMP9]], splat (i64 2) +; CHECK-NEXT: [[TMP17:%.*]] = add <vscale x 1 x i64> [[TMP10]], splat (i64 3) +; CHECK-NEXT: [[TMP18:%.*]] = add <vscale x 1 x i64> [[TMP11]], splat (i64 4) +; CHECK-NEXT: [[TMP19:%.*]] = add <vscale x 1 x i64> [[TMP12]], splat (i64 5) +; CHECK-NEXT: [[TMP20:%.*]] = add <vscale x 1 x i64> [[TMP13]], splat (i64 6) +; CHECK-NEXT: [[TMP21:%.*]] = add <vscale x 1 x i64> [[TMP14]], splat (i64 7) +; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 7 x i64> @llvm.vector.interleave7.nxv7i64(<vscale x 1 x i64> [[TMP15]], <vscale x 1 x i64> [[TMP16]], <vscale x 1 x i64> [[TMP17]], <vscale x 1 x i64> [[TMP18]], <vscale x 1 x i64> [[TMP19]], <vscale x 1 x i64> [[TMP20]], <vscale x 1 x i64> [[TMP21]]) +; CHECK-NEXT: [[INTERLEAVED_MASK1:%.*]] = call <vscale x 7 x i1> @llvm.vector.interleave7.nxv7i1(<vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]]) +; CHECK-NEXT: call void @llvm.masked.store.nxv7i64.p0(<vscale x 7 x i64> [[INTERLEAVED_VEC]], ptr [[TMP23]], i32 8, <vscale x 7 x i1> [[INTERLEAVED_MASK1]]) +; CHECK-NEXT: [[TMP31:%.*]] = zext i32 [[TMP7]] to i64 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP31]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP31]] +; CHECK-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] +; CHECK-NEXT: br label [[EXIT:%.*]] ; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ] ; CHECK-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 7 ; CHECK-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] ; CHECK-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 @@ -1603,7 +1643,7 @@ define void @load_store_factor7(ptr %p) { ; CHECK-NEXT: store i64 [[Y6]], ptr [[Q6]], align 8 ; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1 ; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP17:![0-9]+]] +; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP18:![0-9]+]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -1694,48 +1734,52 @@ define void @load_store_factor7(ptr %p) { ; ; SCALABLE-LABEL: @load_store_factor7( ; SCALABLE-NEXT: entry: -; SCALABLE-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() -; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP3]] -; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; SCALABLE-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; SCALABLE: vector.ph: -; SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP4]] -; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() ; SCALABLE-NEXT: br label [[VECTOR_BODY:%.*]] ; SCALABLE: vector.body: ; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; SCALABLE-NEXT: [[TMP0:%.*]] = mul i64 [[INDEX]], 7 -; SCALABLE-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP0]] -; SCALABLE-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 7 x i64>, ptr [[TMP1]], align 8 -; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } @llvm.vector.deinterleave7.nxv7i64(<vscale x 7 x i64> [[WIDE_VEC]]) -; SCALABLE-NEXT: [[TMP5:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 0 -; SCALABLE-NEXT: [[TMP6:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 1 -; SCALABLE-NEXT: [[TMP7:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 2 -; SCALABLE-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 3 -; SCALABLE-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 4 -; SCALABLE-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 5 -; SCALABLE-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 6 -; SCALABLE-NEXT: [[TMP12:%.*]] = add <vscale x 1 x i64> [[TMP5]], splat (i64 1) -; SCALABLE-NEXT: [[TMP13:%.*]] = add <vscale x 1 x i64> [[TMP6]], splat (i64 2) -; SCALABLE-NEXT: [[TMP14:%.*]] = add <vscale x 1 x i64> [[TMP7]], splat (i64 3) -; SCALABLE-NEXT: [[TMP15:%.*]] = add <vscale x 1 x i64> [[TMP8]], splat (i64 4) -; SCALABLE-NEXT: [[TMP16:%.*]] = add <vscale x 1 x i64> [[TMP9]], splat (i64 5) -; SCALABLE-NEXT: [[TMP17:%.*]] = add <vscale x 1 x i64> [[TMP10]], splat (i64 6) -; SCALABLE-NEXT: [[TMP18:%.*]] = add <vscale x 1 x i64> [[TMP11]], splat (i64 7) -; SCALABLE-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 7 x i64> @llvm.vector.interleave7.nxv7i64(<vscale x 1 x i64> [[TMP12]], <vscale x 1 x i64> [[TMP13]], <vscale x 1 x i64> [[TMP14]], <vscale x 1 x i64> [[TMP15]], <vscale x 1 x i64> [[TMP16]], <vscale x 1 x i64> [[TMP17]], <vscale x 1 x i64> [[TMP18]]) -; SCALABLE-NEXT: store <vscale x 7 x i64> [[INTERLEAVED_VEC]], ptr [[TMP1]], align 8 -; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]] -; SCALABLE-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; SCALABLE-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; SCALABLE-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 1, i1 true) +; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 1 x i32> poison, i32 [[TMP7]], i64 0 +; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 1 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer +; SCALABLE-NEXT: [[TMP22:%.*]] = call <vscale x 1 x i32> @llvm.stepvector.nxv1i32() +; SCALABLE-NEXT: [[TMP5:%.*]] = icmp ult <vscale x 1 x i32> [[TMP22]], [[BROADCAST_SPLAT]] +; SCALABLE-NEXT: [[TMP6:%.*]] = mul i64 [[INDEX]], 7 +; SCALABLE-NEXT: [[TMP23:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP6]] +; SCALABLE-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 7 x i1> @llvm.vector.interleave7.nxv7i1(<vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]]) +; SCALABLE-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 7 x i64> @llvm.masked.load.nxv7i64.p0(ptr [[TMP23]], i32 8, <vscale x 7 x i1> [[INTERLEAVED_MASK]], <vscale x 7 x i64> poison) +; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } @llvm.vector.deinterleave7.nxv7i64(<vscale x 7 x i64> [[WIDE_MASKED_VEC]]) +; SCALABLE-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 0 +; SCALABLE-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 1 +; SCALABLE-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 2 +; SCALABLE-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 3 +; SCALABLE-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 4 +; SCALABLE-NEXT: [[TMP13:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 5 +; SCALABLE-NEXT: [[TMP14:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 6 +; SCALABLE-NEXT: [[TMP15:%.*]] = add <vscale x 1 x i64> [[TMP8]], splat (i64 1) +; SCALABLE-NEXT: [[TMP16:%.*]] = add <vscale x 1 x i64> [[TMP9]], splat (i64 2) +; SCALABLE-NEXT: [[TMP17:%.*]] = add <vscale x 1 x i64> [[TMP10]], splat (i64 3) +; SCALABLE-NEXT: [[TMP18:%.*]] = add <vscale x 1 x i64> [[TMP11]], splat (i64 4) +; SCALABLE-NEXT: [[TMP19:%.*]] = add <vscale x 1 x i64> [[TMP12]], splat (i64 5) +; SCALABLE-NEXT: [[TMP20:%.*]] = add <vscale x 1 x i64> [[TMP13]], splat (i64 6) +; SCALABLE-NEXT: [[TMP21:%.*]] = add <vscale x 1 x i64> [[TMP14]], splat (i64 7) +; SCALABLE-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 7 x i64> @llvm.vector.interleave7.nxv7i64(<vscale x 1 x i64> [[TMP15]], <vscale x 1 x i64> [[TMP16]], <vscale x 1 x i64> [[TMP17]], <vscale x 1 x i64> [[TMP18]], <vscale x 1 x i64> [[TMP19]], <vscale x 1 x i64> [[TMP20]], <vscale x 1 x i64> [[TMP21]]) +; SCALABLE-NEXT: [[INTERLEAVED_MASK1:%.*]] = call <vscale x 7 x i1> @llvm.vector.interleave7.nxv7i1(<vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]]) +; SCALABLE-NEXT: call void @llvm.masked.store.nxv7i64.p0(<vscale x 7 x i64> [[INTERLEAVED_VEC]], ptr [[TMP23]], i32 8, <vscale x 7 x i1> [[INTERLEAVED_MASK1]]) +; SCALABLE-NEXT: [[TMP31:%.*]] = zext i32 [[TMP7]] to i64 +; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP31]], [[INDEX]] +; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP31]] +; SCALABLE-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; SCALABLE-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; SCALABLE: middle.block: -; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; SCALABLE-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] +; SCALABLE-NEXT: br label [[EXIT:%.*]] ; SCALABLE: scalar.ph: -; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; SCALABLE-NEXT: br label [[LOOP:%.*]] ; SCALABLE: loop: -; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ] +; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ] ; SCALABLE-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 7 ; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] ; SCALABLE-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 @@ -1773,7 +1817,7 @@ define void @load_store_factor7(ptr %p) { ; SCALABLE-NEXT: store i64 [[Y6]], ptr [[Q6]], align 8 ; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1 ; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP17:![0-9]+]] +; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP18:![0-9]+]] ; SCALABLE: exit: ; SCALABLE-NEXT: ret void ; @@ -1834,50 +1878,54 @@ exit: define void @load_store_factor8(ptr %p) { ; CHECK-LABEL: @load_store_factor8( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP0]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP1]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[INDEX]], 3 -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP3]] -; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i64>, ptr [[TMP4]], align 8 -; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } @llvm.vector.deinterleave8.nxv8i64(<vscale x 8 x i64> [[WIDE_VEC]]) -; CHECK-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 0 -; CHECK-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 1 -; CHECK-NEXT: [[TMP13:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 2 -; CHECK-NEXT: [[TMP14:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 3 -; CHECK-NEXT: [[TMP15:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 4 -; CHECK-NEXT: [[TMP16:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 5 -; CHECK-NEXT: [[TMP17:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 6 -; CHECK-NEXT: [[TMP18:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 7 -; CHECK-NEXT: [[TMP19:%.*]] = add <vscale x 1 x i64> [[TMP11]], splat (i64 1) -; CHECK-NEXT: [[TMP20:%.*]] = add <vscale x 1 x i64> [[TMP12]], splat (i64 2) -; CHECK-NEXT: [[TMP21:%.*]] = add <vscale x 1 x i64> [[TMP13]], splat (i64 3) -; CHECK-NEXT: [[TMP22:%.*]] = add <vscale x 1 x i64> [[TMP14]], splat (i64 4) -; CHECK-NEXT: [[TMP23:%.*]] = add <vscale x 1 x i64> [[TMP15]], splat (i64 5) -; CHECK-NEXT: [[TMP24:%.*]] = add <vscale x 1 x i64> [[TMP16]], splat (i64 6) -; CHECK-NEXT: [[TMP25:%.*]] = add <vscale x 1 x i64> [[TMP17]], splat (i64 7) -; CHECK-NEXT: [[TMP26:%.*]] = add <vscale x 1 x i64> [[TMP18]], splat (i64 8) -; CHECK-NEXT: [[INTERLEAVED_VEC12:%.*]] = call <vscale x 8 x i64> @llvm.vector.interleave8.nxv8i64(<vscale x 1 x i64> [[TMP19]], <vscale x 1 x i64> [[TMP20]], <vscale x 1 x i64> [[TMP21]], <vscale x 1 x i64> [[TMP22]], <vscale x 1 x i64> [[TMP23]], <vscale x 1 x i64> [[TMP24]], <vscale x 1 x i64> [[TMP25]], <vscale x 1 x i64> [[TMP26]]) -; CHECK-NEXT: store <vscale x 8 x i64> [[INTERLEAVED_VEC12]], ptr [[TMP4]], align 8 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]] -; CHECK-NEXT: [[TMP27:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP27]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 1, i1 true) +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 1 x i32> poison, i32 [[TMP7]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 1 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer +; CHECK-NEXT: [[TMP4:%.*]] = call <vscale x 1 x i32> @llvm.stepvector.nxv1i32() +; CHECK-NEXT: [[TMP5:%.*]] = icmp ult <vscale x 1 x i32> [[TMP4]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[TMP6:%.*]] = shl i64 [[INDEX]], 3 +; CHECK-NEXT: [[TMP24:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP6]] +; CHECK-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 8 x i1> @llvm.vector.interleave8.nxv8i1(<vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]]) +; CHECK-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 8 x i64> @llvm.masked.load.nxv8i64.p0(ptr [[TMP24]], i32 8, <vscale x 8 x i1> [[INTERLEAVED_MASK]], <vscale x 8 x i64> poison) +; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } @llvm.vector.deinterleave8.nxv8i64(<vscale x 8 x i64> [[WIDE_MASKED_VEC]]) +; CHECK-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 0 +; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 1 +; CHECK-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 2 +; CHECK-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 3 +; CHECK-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 4 +; CHECK-NEXT: [[TMP13:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 5 +; CHECK-NEXT: [[TMP14:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 6 +; CHECK-NEXT: [[TMP15:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 7 +; CHECK-NEXT: [[TMP16:%.*]] = add <vscale x 1 x i64> [[TMP8]], splat (i64 1) +; CHECK-NEXT: [[TMP17:%.*]] = add <vscale x 1 x i64> [[TMP9]], splat (i64 2) +; CHECK-NEXT: [[TMP18:%.*]] = add <vscale x 1 x i64> [[TMP10]], splat (i64 3) +; CHECK-NEXT: [[TMP19:%.*]] = add <vscale x 1 x i64> [[TMP11]], splat (i64 4) +; CHECK-NEXT: [[TMP20:%.*]] = add <vscale x 1 x i64> [[TMP12]], splat (i64 5) +; CHECK-NEXT: [[TMP21:%.*]] = add <vscale x 1 x i64> [[TMP13]], splat (i64 6) +; CHECK-NEXT: [[TMP22:%.*]] = add <vscale x 1 x i64> [[TMP14]], splat (i64 7) +; CHECK-NEXT: [[TMP23:%.*]] = add <vscale x 1 x i64> [[TMP15]], splat (i64 8) +; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 8 x i64> @llvm.vector.interleave8.nxv8i64(<vscale x 1 x i64> [[TMP16]], <vscale x 1 x i64> [[TMP17]], <vscale x 1 x i64> [[TMP18]], <vscale x 1 x i64> [[TMP19]], <vscale x 1 x i64> [[TMP20]], <vscale x 1 x i64> [[TMP21]], <vscale x 1 x i64> [[TMP22]], <vscale x 1 x i64> [[TMP23]]) +; CHECK-NEXT: [[INTERLEAVED_MASK1:%.*]] = call <vscale x 8 x i1> @llvm.vector.interleave8.nxv8i1(<vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]]) +; CHECK-NEXT: call void @llvm.masked.store.nxv8i64.p0(<vscale x 8 x i64> [[INTERLEAVED_VEC]], ptr [[TMP24]], i32 8, <vscale x 8 x i1> [[INTERLEAVED_MASK1]]) +; CHECK-NEXT: [[TMP34:%.*]] = zext i32 [[TMP7]] to i64 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP34]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP34]] +; CHECK-NEXT: [[TMP35:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-NEXT: br i1 [[TMP35]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] +; CHECK-NEXT: br label [[EXIT:%.*]] ; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ] ; CHECK-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 3 ; CHECK-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] ; CHECK-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 @@ -1920,7 +1968,7 @@ define void @load_store_factor8(ptr %p) { ; CHECK-NEXT: store i64 [[Y7]], ptr [[Q7]], align 8 ; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1 ; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP19:![0-9]+]] +; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP20:![0-9]+]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -2017,50 +2065,54 @@ define void @load_store_factor8(ptr %p) { ; ; SCALABLE-LABEL: @load_store_factor8( ; SCALABLE-NEXT: entry: -; SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP0]] -; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; SCALABLE-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; SCALABLE: vector.ph: -; SCALABLE-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() -; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP1]] -; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() ; SCALABLE-NEXT: br label [[VECTOR_BODY:%.*]] ; SCALABLE: vector.body: ; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; SCALABLE-NEXT: [[TMP3:%.*]] = shl i64 [[INDEX]], 3 -; SCALABLE-NEXT: [[TMP4:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP3]] -; SCALABLE-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i64>, ptr [[TMP4]], align 8 -; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } @llvm.vector.deinterleave8.nxv8i64(<vscale x 8 x i64> [[WIDE_VEC]]) -; SCALABLE-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 0 -; SCALABLE-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 1 -; SCALABLE-NEXT: [[TMP13:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 2 -; SCALABLE-NEXT: [[TMP14:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 3 -; SCALABLE-NEXT: [[TMP15:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 4 -; SCALABLE-NEXT: [[TMP16:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 5 -; SCALABLE-NEXT: [[TMP17:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 6 -; SCALABLE-NEXT: [[TMP18:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 7 -; SCALABLE-NEXT: [[TMP19:%.*]] = add <vscale x 1 x i64> [[TMP11]], splat (i64 1) -; SCALABLE-NEXT: [[TMP20:%.*]] = add <vscale x 1 x i64> [[TMP12]], splat (i64 2) -; SCALABLE-NEXT: [[TMP21:%.*]] = add <vscale x 1 x i64> [[TMP13]], splat (i64 3) -; SCALABLE-NEXT: [[TMP22:%.*]] = add <vscale x 1 x i64> [[TMP14]], splat (i64 4) -; SCALABLE-NEXT: [[TMP23:%.*]] = add <vscale x 1 x i64> [[TMP15]], splat (i64 5) -; SCALABLE-NEXT: [[TMP24:%.*]] = add <vscale x 1 x i64> [[TMP16]], splat (i64 6) -; SCALABLE-NEXT: [[TMP25:%.*]] = add <vscale x 1 x i64> [[TMP17]], splat (i64 7) -; SCALABLE-NEXT: [[TMP26:%.*]] = add <vscale x 1 x i64> [[TMP18]], splat (i64 8) -; SCALABLE-NEXT: [[INTERLEAVED_VEC12:%.*]] = call <vscale x 8 x i64> @llvm.vector.interleave8.nxv8i64(<vscale x 1 x i64> [[TMP19]], <vscale x 1 x i64> [[TMP20]], <vscale x 1 x i64> [[TMP21]], <vscale x 1 x i64> [[TMP22]], <vscale x 1 x i64> [[TMP23]], <vscale x 1 x i64> [[TMP24]], <vscale x 1 x i64> [[TMP25]], <vscale x 1 x i64> [[TMP26]]) -; SCALABLE-NEXT: store <vscale x 8 x i64> [[INTERLEAVED_VEC12]], ptr [[TMP4]], align 8 -; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]] -; SCALABLE-NEXT: [[TMP27:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; SCALABLE-NEXT: br i1 [[TMP27]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; SCALABLE-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 1, i1 true) +; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 1 x i32> poison, i32 [[TMP7]], i64 0 +; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 1 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer +; SCALABLE-NEXT: [[TMP4:%.*]] = call <vscale x 1 x i32> @llvm.stepvector.nxv1i32() +; SCALABLE-NEXT: [[TMP5:%.*]] = icmp ult <vscale x 1 x i32> [[TMP4]], [[BROADCAST_SPLAT]] +; SCALABLE-NEXT: [[TMP6:%.*]] = shl i64 [[INDEX]], 3 +; SCALABLE-NEXT: [[TMP24:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP6]] +; SCALABLE-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 8 x i1> @llvm.vector.interleave8.nxv8i1(<vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]]) +; SCALABLE-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 8 x i64> @llvm.masked.load.nxv8i64.p0(ptr [[TMP24]], i32 8, <vscale x 8 x i1> [[INTERLEAVED_MASK]], <vscale x 8 x i64> poison) +; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } @llvm.vector.deinterleave8.nxv8i64(<vscale x 8 x i64> [[WIDE_MASKED_VEC]]) +; SCALABLE-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 0 +; SCALABLE-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 1 +; SCALABLE-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 2 +; SCALABLE-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 3 +; SCALABLE-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 4 +; SCALABLE-NEXT: [[TMP13:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 5 +; SCALABLE-NEXT: [[TMP14:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 6 +; SCALABLE-NEXT: [[TMP15:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 7 +; SCALABLE-NEXT: [[TMP16:%.*]] = add <vscale x 1 x i64> [[TMP8]], splat (i64 1) +; SCALABLE-NEXT: [[TMP17:%.*]] = add <vscale x 1 x i64> [[TMP9]], splat (i64 2) +; SCALABLE-NEXT: [[TMP18:%.*]] = add <vscale x 1 x i64> [[TMP10]], splat (i64 3) +; SCALABLE-NEXT: [[TMP19:%.*]] = add <vscale x 1 x i64> [[TMP11]], splat (i64 4) +; SCALABLE-NEXT: [[TMP20:%.*]] = add <vscale x 1 x i64> [[TMP12]], splat (i64 5) +; SCALABLE-NEXT: [[TMP21:%.*]] = add <vscale x 1 x i64> [[TMP13]], splat (i64 6) +; SCALABLE-NEXT: [[TMP22:%.*]] = add <vscale x 1 x i64> [[TMP14]], splat (i64 7) +; SCALABLE-NEXT: [[TMP23:%.*]] = add <vscale x 1 x i64> [[TMP15]], splat (i64 8) +; SCALABLE-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 8 x i64> @llvm.vector.interleave8.nxv8i64(<vscale x 1 x i64> [[TMP16]], <vscale x 1 x i64> [[TMP17]], <vscale x 1 x i64> [[TMP18]], <vscale x 1 x i64> [[TMP19]], <vscale x 1 x i64> [[TMP20]], <vscale x 1 x i64> [[TMP21]], <vscale x 1 x i64> [[TMP22]], <vscale x 1 x i64> [[TMP23]]) +; SCALABLE-NEXT: [[INTERLEAVED_MASK1:%.*]] = call <vscale x 8 x i1> @llvm.vector.interleave8.nxv8i1(<vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]]) +; SCALABLE-NEXT: call void @llvm.masked.store.nxv8i64.p0(<vscale x 8 x i64> [[INTERLEAVED_VEC]], ptr [[TMP24]], i32 8, <vscale x 8 x i1> [[INTERLEAVED_MASK1]]) +; SCALABLE-NEXT: [[TMP34:%.*]] = zext i32 [[TMP7]] to i64 +; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP34]], [[INDEX]] +; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP34]] +; SCALABLE-NEXT: [[TMP35:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; SCALABLE-NEXT: br i1 [[TMP35]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; SCALABLE: middle.block: -; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; SCALABLE-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] +; SCALABLE-NEXT: br label [[EXIT:%.*]] ; SCALABLE: scalar.ph: -; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; SCALABLE-NEXT: br label [[LOOP:%.*]] ; SCALABLE: loop: -; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ] +; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ] ; SCALABLE-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 3 ; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] ; SCALABLE-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 @@ -2103,7 +2155,7 @@ define void @load_store_factor8(ptr %p) { ; SCALABLE-NEXT: store i64 [[Y7]], ptr [[Q7]], align 8 ; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1 ; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP19:![0-9]+]] +; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP20:![0-9]+]] ; SCALABLE: exit: ; SCALABLE-NEXT: ret void ; @@ -2170,40 +2222,41 @@ exit: define void @combine_load_factor2_i32(ptr noalias %p, ptr noalias %q) { ; CHECK-LABEL: @combine_load_factor2_i32( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP6:%.*]] = shl i64 [[INDEX]], 1 -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP6]] -; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i32>, ptr [[TMP7]], align 4 -; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC]]) +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP7]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer +; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32() +; CHECK-NEXT: [[TMP12:%.*]] = icmp ult <vscale x 4 x i32> [[TMP6]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[TMP13:%.*]] = shl i64 [[INDEX]], 1 +; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP13]] +; CHECK-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 8 x i1> @llvm.vector.interleave2.nxv8i1(<vscale x 4 x i1> [[TMP12]], <vscale x 4 x i1> [[TMP12]]) +; CHECK-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 8 x i32> @llvm.masked.load.nxv8i32.p0(ptr [[TMP15]], i32 4, <vscale x 8 x i1> [[INTERLEAVED_MASK]], <vscale x 8 x i32> poison) +; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_MASKED_VEC]]) ; CHECK-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 0 ; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 1 ; CHECK-NEXT: [[TMP10:%.*]] = add <vscale x 4 x i32> [[TMP8]], [[TMP9]] ; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[Q:%.*]], i64 [[INDEX]] -; CHECK-NEXT: store <vscale x 4 x i32> [[TMP10]], ptr [[TMP11]], align 4 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; CHECK-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP10]], ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]]) +; CHECK-NEXT: [[TMP16:%.*]] = zext i32 [[TMP7]] to i64 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP16]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]] +; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] +; CHECK-NEXT: br label [[EXIT:%.*]] ; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ] ; CHECK-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1 ; CHECK-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET0]] ; CHECK-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4 @@ -2215,7 +2268,7 @@ define void @combine_load_factor2_i32(ptr noalias %p, ptr noalias %q) { ; CHECK-NEXT: store i32 [[RES]], ptr [[DST]], align 4 ; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1 ; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP21:![0-9]+]] +; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP22:![0-9]+]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -2226,24 +2279,15 @@ define void @combine_load_factor2_i32(ptr noalias %p, ptr noalias %q) { ; FIXED-NEXT: br label [[VECTOR_BODY:%.*]] ; FIXED: vector.body: ; FIXED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; FIXED-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 8 -; FIXED-NEXT: [[TMP1:%.*]] = shl i64 [[INDEX]], 1 -; FIXED-NEXT: [[TMP2:%.*]] = shl i64 [[TMP0]], 1 -; FIXED-NEXT: [[TMP3:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP1]] -; FIXED-NEXT: [[TMP4:%.*]] = getelementptr i32, ptr [[P]], i64 [[TMP2]] -; FIXED-NEXT: [[WIDE_VEC:%.*]] = load <16 x i32>, ptr [[TMP3]], align 4 -; FIXED-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <16 x i32> [[WIDE_VEC]], <16 x i32> poison, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14> -; FIXED-NEXT: [[STRIDED_VEC1:%.*]] = shufflevector <16 x i32> [[WIDE_VEC]], <16 x i32> poison, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15> +; FIXED-NEXT: [[TMP2:%.*]] = shl i64 [[INDEX]], 1 +; FIXED-NEXT: [[TMP4:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP2]] ; FIXED-NEXT: [[WIDE_VEC2:%.*]] = load <16 x i32>, ptr [[TMP4]], align 4 ; FIXED-NEXT: [[STRIDED_VEC3:%.*]] = shufflevector <16 x i32> [[WIDE_VEC2]], <16 x i32> poison, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14> ; FIXED-NEXT: [[STRIDED_VEC4:%.*]] = shufflevector <16 x i32> [[WIDE_VEC2]], <16 x i32> poison, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15> -; FIXED-NEXT: [[TMP5:%.*]] = add <8 x i32> [[STRIDED_VEC]], [[STRIDED_VEC1]] ; FIXED-NEXT: [[TMP6:%.*]] = add <8 x i32> [[STRIDED_VEC3]], [[STRIDED_VEC4]] ; FIXED-NEXT: [[TMP7:%.*]] = getelementptr i32, ptr [[Q:%.*]], i64 [[INDEX]] -; FIXED-NEXT: [[TMP9:%.*]] = getelementptr i32, ptr [[TMP7]], i32 8 -; FIXED-NEXT: store <8 x i32> [[TMP5]], ptr [[TMP7]], align 4 -; FIXED-NEXT: store <8 x i32> [[TMP6]], ptr [[TMP9]], align 4 -; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; FIXED-NEXT: store <8 x i32> [[TMP6]], ptr [[TMP7]], align 4 +; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 ; FIXED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; FIXED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; FIXED: middle.block: @@ -2270,40 +2314,41 @@ define void @combine_load_factor2_i32(ptr noalias %p, ptr noalias %q) { ; ; SCALABLE-LABEL: @combine_load_factor2_i32( ; SCALABLE-NEXT: entry: -; SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; SCALABLE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]] -; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; SCALABLE-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; SCALABLE: vector.ph: -; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] -; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; SCALABLE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; SCALABLE-NEXT: br label [[VECTOR_BODY:%.*]] ; SCALABLE: vector.body: ; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; SCALABLE-NEXT: [[TMP6:%.*]] = shl i64 [[INDEX]], 1 -; SCALABLE-NEXT: [[TMP7:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP6]] -; SCALABLE-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i32>, ptr [[TMP7]], align 4 -; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC]]) +; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; SCALABLE-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) +; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP7]], i64 0 +; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer +; SCALABLE-NEXT: [[TMP6:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32() +; SCALABLE-NEXT: [[TMP12:%.*]] = icmp ult <vscale x 4 x i32> [[TMP6]], [[BROADCAST_SPLAT]] +; SCALABLE-NEXT: [[TMP13:%.*]] = shl i64 [[INDEX]], 1 +; SCALABLE-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP13]] +; SCALABLE-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 8 x i1> @llvm.vector.interleave2.nxv8i1(<vscale x 4 x i1> [[TMP12]], <vscale x 4 x i1> [[TMP12]]) +; SCALABLE-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 8 x i32> @llvm.masked.load.nxv8i32.p0(ptr [[TMP15]], i32 4, <vscale x 8 x i1> [[INTERLEAVED_MASK]], <vscale x 8 x i32> poison) +; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_MASKED_VEC]]) ; SCALABLE-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 0 ; SCALABLE-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 1 ; SCALABLE-NEXT: [[TMP10:%.*]] = add <vscale x 4 x i32> [[TMP8]], [[TMP9]] ; SCALABLE-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[Q:%.*]], i64 [[INDEX]] -; SCALABLE-NEXT: store <vscale x 4 x i32> [[TMP10]], ptr [[TMP11]], align 4 -; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; SCALABLE-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; SCALABLE-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; SCALABLE-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP10]], ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]]) +; SCALABLE-NEXT: [[TMP16:%.*]] = zext i32 [[TMP7]] to i64 +; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP16]], [[INDEX]] +; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]] +; SCALABLE-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; SCALABLE-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] ; SCALABLE: middle.block: -; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; SCALABLE-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] +; SCALABLE-NEXT: br label [[EXIT:%.*]] ; SCALABLE: scalar.ph: -; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; SCALABLE-NEXT: br label [[LOOP:%.*]] ; SCALABLE: loop: -; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ] +; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ] ; SCALABLE-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1 ; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET0]] ; SCALABLE-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4 @@ -2315,7 +2360,7 @@ define void @combine_load_factor2_i32(ptr noalias %p, ptr noalias %q) { ; SCALABLE-NEXT: store i32 [[RES]], ptr [[DST]], align 4 ; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1 ; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP21:![0-9]+]] +; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP22:![0-9]+]] ; SCALABLE: exit: ; SCALABLE-NEXT: ret void ; @@ -2347,40 +2392,41 @@ exit: define void @combine_load_factor2_i64(ptr noalias %p, ptr noalias %q) { ; CHECK-LABEL: @combine_load_factor2_i64( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP6:%.*]] = shl i64 [[INDEX]], 1 -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP6]] -; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 4 x i64>, ptr [[TMP7]], align 8 -; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.vector.deinterleave2.nxv4i64(<vscale x 4 x i64> [[WIDE_VEC]]) +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP7]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32() +; CHECK-NEXT: [[TMP12:%.*]] = icmp ult <vscale x 2 x i32> [[TMP6]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[TMP13:%.*]] = shl i64 [[INDEX]], 1 +; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP13]] +; CHECK-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 4 x i1> @llvm.vector.interleave2.nxv4i1(<vscale x 2 x i1> [[TMP12]], <vscale x 2 x i1> [[TMP12]]) +; CHECK-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 4 x i64> @llvm.masked.load.nxv4i64.p0(ptr [[TMP15]], i32 8, <vscale x 4 x i1> [[INTERLEAVED_MASK]], <vscale x 4 x i64> poison) +; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.vector.deinterleave2.nxv4i64(<vscale x 4 x i64> [[WIDE_MASKED_VEC]]) ; CHECK-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 0 ; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 1 ; CHECK-NEXT: [[TMP10:%.*]] = add <vscale x 2 x i64> [[TMP8]], [[TMP9]] ; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i64, ptr [[Q:%.*]], i64 [[INDEX]] -; CHECK-NEXT: store <vscale x 2 x i64> [[TMP10]], ptr [[TMP11]], align 8 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP10]], ptr align 8 [[TMP11]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP7]]) +; CHECK-NEXT: [[TMP16:%.*]] = zext i32 [[TMP7]] to i64 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP16]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]] +; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] +; CHECK-NEXT: br label [[EXIT:%.*]] ; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ] ; CHECK-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1 ; CHECK-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] ; CHECK-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 @@ -2392,7 +2438,7 @@ define void @combine_load_factor2_i64(ptr noalias %p, ptr noalias %q) { ; CHECK-NEXT: store i64 [[RES]], ptr [[DST]], align 8 ; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1 ; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP23:![0-9]+]] +; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP24:![0-9]+]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -2403,24 +2449,15 @@ define void @combine_load_factor2_i64(ptr noalias %p, ptr noalias %q) { ; FIXED-NEXT: br label [[VECTOR_BODY:%.*]] ; FIXED: vector.body: ; FIXED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; FIXED-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 4 -; FIXED-NEXT: [[TMP1:%.*]] = shl i64 [[INDEX]], 1 -; FIXED-NEXT: [[TMP2:%.*]] = shl i64 [[TMP0]], 1 -; FIXED-NEXT: [[TMP3:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP1]] -; FIXED-NEXT: [[TMP4:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP2]] -; FIXED-NEXT: [[WIDE_VEC:%.*]] = load <8 x i64>, ptr [[TMP3]], align 8 -; FIXED-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <8 x i64> [[WIDE_VEC]], <8 x i64> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6> -; FIXED-NEXT: [[STRIDED_VEC1:%.*]] = shufflevector <8 x i64> [[WIDE_VEC]], <8 x i64> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7> +; FIXED-NEXT: [[TMP2:%.*]] = shl i64 [[INDEX]], 1 +; FIXED-NEXT: [[TMP4:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP2]] ; FIXED-NEXT: [[WIDE_VEC2:%.*]] = load <8 x i64>, ptr [[TMP4]], align 8 ; FIXED-NEXT: [[STRIDED_VEC3:%.*]] = shufflevector <8 x i64> [[WIDE_VEC2]], <8 x i64> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6> ; FIXED-NEXT: [[STRIDED_VEC4:%.*]] = shufflevector <8 x i64> [[WIDE_VEC2]], <8 x i64> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7> -; FIXED-NEXT: [[TMP5:%.*]] = add <4 x i64> [[STRIDED_VEC]], [[STRIDED_VEC1]] ; FIXED-NEXT: [[TMP6:%.*]] = add <4 x i64> [[STRIDED_VEC3]], [[STRIDED_VEC4]] ; FIXED-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[Q:%.*]], i64 [[INDEX]] -; FIXED-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[TMP7]], i32 4 -; FIXED-NEXT: store <4 x i64> [[TMP5]], ptr [[TMP7]], align 8 -; FIXED-NEXT: store <4 x i64> [[TMP6]], ptr [[TMP9]], align 8 -; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 +; FIXED-NEXT: store <4 x i64> [[TMP6]], ptr [[TMP7]], align 8 +; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; FIXED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; FIXED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] ; FIXED: middle.block: @@ -2447,40 +2484,41 @@ define void @combine_load_factor2_i64(ptr noalias %p, ptr noalias %q) { ; ; SCALABLE-LABEL: @combine_load_factor2_i64( ; SCALABLE-NEXT: entry: -; SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; SCALABLE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]] -; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; SCALABLE-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; SCALABLE: vector.ph: -; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 -; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] -; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; SCALABLE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; SCALABLE-NEXT: br label [[VECTOR_BODY:%.*]] ; SCALABLE: vector.body: ; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; SCALABLE-NEXT: [[TMP6:%.*]] = shl i64 [[INDEX]], 1 -; SCALABLE-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP6]] -; SCALABLE-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 4 x i64>, ptr [[TMP7]], align 8 -; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.vector.deinterleave2.nxv4i64(<vscale x 4 x i64> [[WIDE_VEC]]) +; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; SCALABLE-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) +; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP7]], i64 0 +; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer +; SCALABLE-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32() +; SCALABLE-NEXT: [[TMP12:%.*]] = icmp ult <vscale x 2 x i32> [[TMP6]], [[BROADCAST_SPLAT]] +; SCALABLE-NEXT: [[TMP13:%.*]] = shl i64 [[INDEX]], 1 +; SCALABLE-NEXT: [[TMP15:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP13]] +; SCALABLE-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 4 x i1> @llvm.vector.interleave2.nxv4i1(<vscale x 2 x i1> [[TMP12]], <vscale x 2 x i1> [[TMP12]]) +; SCALABLE-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 4 x i64> @llvm.masked.load.nxv4i64.p0(ptr [[TMP15]], i32 8, <vscale x 4 x i1> [[INTERLEAVED_MASK]], <vscale x 4 x i64> poison) +; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.vector.deinterleave2.nxv4i64(<vscale x 4 x i64> [[WIDE_MASKED_VEC]]) ; SCALABLE-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 0 ; SCALABLE-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 1 ; SCALABLE-NEXT: [[TMP10:%.*]] = add <vscale x 2 x i64> [[TMP8]], [[TMP9]] ; SCALABLE-NEXT: [[TMP11:%.*]] = getelementptr i64, ptr [[Q:%.*]], i64 [[INDEX]] -; SCALABLE-NEXT: store <vscale x 2 x i64> [[TMP10]], ptr [[TMP11]], align 8 -; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; SCALABLE-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; SCALABLE-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP10]], ptr align 8 [[TMP11]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP7]]) +; SCALABLE-NEXT: [[TMP16:%.*]] = zext i32 [[TMP7]] to i64 +; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP16]], [[INDEX]] +; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]] +; SCALABLE-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; SCALABLE-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] ; SCALABLE: middle.block: -; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; SCALABLE-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] +; SCALABLE-NEXT: br label [[EXIT:%.*]] ; SCALABLE: scalar.ph: -; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; SCALABLE-NEXT: br label [[LOOP:%.*]] ; SCALABLE: loop: -; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ] +; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ] ; SCALABLE-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1 ; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] ; SCALABLE-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 @@ -2492,7 +2530,7 @@ define void @combine_load_factor2_i64(ptr noalias %p, ptr noalias %q) { ; SCALABLE-NEXT: store i64 [[RES]], ptr [[DST]], align 8 ; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1 ; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP23:![0-9]+]] +; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP24:![0-9]+]] ; SCALABLE: exit: ; SCALABLE-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-cost.ll b/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-cost.ll index 056dc7e..ee91f75 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-cost.ll @@ -7,22 +7,31 @@ ; RUN: opt -passes=loop-vectorize -mtriple=riscv64 -mattr=+v,+optimized-nf6-segment-load-store -debug-only=loop-vectorize -disable-output < %s 2>&1 | FileCheck %s --check-prefix=OPT-NF6 ; RUN: opt -passes=loop-vectorize -mtriple=riscv64 -mattr=+v,+optimized-nf7-segment-load-store -debug-only=loop-vectorize -disable-output < %s 2>&1 | FileCheck %s --check-prefix=OPT-NF7 ; RUN: opt -passes=loop-vectorize -mtriple=riscv64 -mattr=+v,+optimized-nf8-segment-load-store -debug-only=loop-vectorize -disable-output < %s 2>&1 | FileCheck %s --check-prefix=OPT-NF8 +; RUN: opt -passes=loop-vectorize -mtriple=riscv64 -mattr=+v,-optimized-nf2-segment-load-store -scalable-vectorization=off -debug-only=loop-vectorize -disable-output < %s 2>&1 | FileCheck %s --check-prefix=FIXED-NO-OPT +; RUN: opt -passes=loop-vectorize -mtriple=riscv64 -mattr=+v -scalable-vectorization=off -debug-only=loop-vectorize -disable-output < %s 2>&1 | FileCheck %s --check-prefix=FIXED-OPT-NF2 +; RUN: opt -passes=loop-vectorize -mtriple=riscv64 -mattr=+v,+optimized-nf3-segment-load-store -scalable-vectorization=off -debug-only=loop-vectorize -disable-output < %s 2>&1 | FileCheck %s --check-prefix=FIXED-OPT-NF3 +; RUN: opt -passes=loop-vectorize -mtriple=riscv64 -mattr=+v,+optimized-nf4-segment-load-store -scalable-vectorization=off -debug-only=loop-vectorize -disable-output < %s 2>&1 | FileCheck %s --check-prefix=FIXED-OPT-NF4 +; RUN: opt -passes=loop-vectorize -mtriple=riscv64 -mattr=+v,+optimized-nf5-segment-load-store -scalable-vectorization=off -debug-only=loop-vectorize -disable-output < %s 2>&1 | FileCheck %s --check-prefix=FIXED-OPT-NF5 +; RUN: opt -passes=loop-vectorize -mtriple=riscv64 -mattr=+v,+optimized-nf6-segment-load-store -scalable-vectorization=off -debug-only=loop-vectorize -disable-output < %s 2>&1 | FileCheck %s --check-prefix=FIXED-OPT-NF6 +; RUN: opt -passes=loop-vectorize -mtriple=riscv64 -mattr=+v,+optimized-nf7-segment-load-store -scalable-vectorization=off -debug-only=loop-vectorize -disable-output < %s 2>&1 | FileCheck %s --check-prefix=FIXED-OPT-NF7 +; RUN: opt -passes=loop-vectorize -mtriple=riscv64 -mattr=+v,+optimized-nf8-segment-load-store -scalable-vectorization=off -debug-only=loop-vectorize -disable-output < %s 2>&1 | FileCheck %s --check-prefix=FIXED-OPT-NF8 %i8.2 = type {i8, i8} define void @i8_factor_2(ptr %data, i64 %n) { entry: br label %for.body +; FIXED-OPT-NF2-LABEL: Checking a loop in 'i8_factor_2' +; FIXED-OPT-NF2: Cost of 3 for VF 2: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0> +; FIXED-OPT-NF2: Cost of 3 for VF 2: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0> +; FIXED-OPT-NF2: Cost of 3 for VF 4: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0> +; FIXED-OPT-NF2: Cost of 3 for VF 4: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0> +; FIXED-OPT-NF2: Cost of 3 for VF 8: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0> +; FIXED-OPT-NF2: Cost of 3 for VF 8: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0> +; FIXED-OPT-NF2: Cost of 4 for VF 16: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0> +; FIXED-OPT-NF2: Cost of 4 for VF 16: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0> +; FIXED-OPT-NF2: Cost of 8 for VF 32: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0> +; FIXED-OPT-NF2: Cost of 8 for VF 32: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0> ; OPT-NF2-LABEL: Checking a loop in 'i8_factor_2' -; OPT-NF2: Cost of 3 for VF 2: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0> -; OPT-NF2: Cost of 3 for VF 2: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0> -; OPT-NF2: Cost of 3 for VF 4: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0> -; OPT-NF2: Cost of 3 for VF 4: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0> -; OPT-NF2: Cost of 3 for VF 8: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0> -; OPT-NF2: Cost of 3 for VF 8: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0> -; OPT-NF2: Cost of 4 for VF 16: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0> -; OPT-NF2: Cost of 4 for VF 16: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0> -; OPT-NF2: Cost of 8 for VF 32: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0> -; OPT-NF2: Cost of 8 for VF 32: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0> ; OPT-NF2: Cost of 3 for VF vscale x 1: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0> ; OPT-NF2: Cost of 3 for VF vscale x 1: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0> ; OPT-NF2: Cost of 3 for VF vscale x 2: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0> @@ -33,17 +42,18 @@ entry: ; OPT-NF2: Cost of 4 for VF vscale x 8: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0> ; OPT-NF2: Cost of 8 for VF vscale x 16: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0> ; OPT-NF2: Cost of 8 for VF vscale x 16: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0> +; FIXED-NO-OPT-LABEL: Checking a loop in 'i8_factor_2' +; FIXED-NO-OPT: Cost of 4 for VF 2: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0> +; FIXED-NO-OPT: Cost of 4 for VF 2: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0> +; FIXED-NO-OPT: Cost of 8 for VF 4: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0> +; FIXED-NO-OPT: Cost of 8 for VF 4: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0> +; FIXED-NO-OPT: Cost of 16 for VF 8: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0> +; FIXED-NO-OPT: Cost of 16 for VF 8: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0> +; FIXED-NO-OPT: Cost of 32 for VF 16: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0> +; FIXED-NO-OPT: Cost of 32 for VF 16: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0> +; FIXED-NO-OPT: Cost of 64 for VF 32: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0> +; FIXED-NO-OPT: Cost of 64 for VF 32: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0> ; NO-OPT-LABEL: Checking a loop in 'i8_factor_2' -; NO-OPT: Cost of 4 for VF 2: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0> -; NO-OPT: Cost of 4 for VF 2: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0> -; NO-OPT: Cost of 8 for VF 4: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0> -; NO-OPT: Cost of 8 for VF 4: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0> -; NO-OPT: Cost of 16 for VF 8: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0> -; NO-OPT: Cost of 16 for VF 8: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0> -; NO-OPT: Cost of 32 for VF 16: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0> -; NO-OPT: Cost of 32 for VF 16: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0> -; NO-OPT: Cost of 64 for VF 32: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0> -; NO-OPT: Cost of 64 for VF 32: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0> ; NO-OPT: Cost of 4 for VF vscale x 1: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0> ; NO-OPT: Cost of 4 for VF vscale x 1: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0> ; NO-OPT: Cost of 8 for VF vscale x 2: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0> @@ -76,17 +86,18 @@ for.end: define void @i8_factor_3(ptr %data, i64 %n) { entry: br label %for.body +; FIXED-OPT-NF3-LABEL: Checking a loop in 'i8_factor_3' +; FIXED-OPT-NF3: Cost of 4 for VF 2: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0> +; FIXED-OPT-NF3: Cost of 4 for VF 2: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0> +; FIXED-OPT-NF3: Cost of 4 for VF 4: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0> +; FIXED-OPT-NF3: Cost of 4 for VF 4: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0> +; FIXED-OPT-NF3: Cost of 5 for VF 8: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0> +; FIXED-OPT-NF3: Cost of 5 for VF 8: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0> +; FIXED-OPT-NF3: Cost of 7 for VF 16: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0> +; FIXED-OPT-NF3: Cost of 7 for VF 16: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0> +; FIXED-OPT-NF3: Cost of 14 for VF 32: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0> +; FIXED-OPT-NF3: Cost of 14 for VF 32: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0> ; OPT-NF3-LABEL: Checking a loop in 'i8_factor_3' -; OPT-NF3: Cost of 4 for VF 2: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0> -; OPT-NF3: Cost of 4 for VF 2: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0> -; OPT-NF3: Cost of 4 for VF 4: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0> -; OPT-NF3: Cost of 4 for VF 4: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0> -; OPT-NF3: Cost of 5 for VF 8: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0> -; OPT-NF3: Cost of 5 for VF 8: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0> -; OPT-NF3: Cost of 7 for VF 16: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0> -; OPT-NF3: Cost of 7 for VF 16: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0> -; OPT-NF3: Cost of 14 for VF 32: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0> -; OPT-NF3: Cost of 14 for VF 32: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0> ; OPT-NF3: Cost of 4 for VF vscale x 1: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0> ; OPT-NF3: Cost of 4 for VF vscale x 1: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0> ; OPT-NF3: Cost of 4 for VF vscale x 2: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0> @@ -97,17 +108,18 @@ entry: ; OPT-NF3: Cost of 7 for VF vscale x 8: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0> ; OPT-NF3: Cost of 14 for VF vscale x 16: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0> ; OPT-NF3: Cost of 14 for VF vscale x 16: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0> +; FIXED-NO-OPT-LABEL: Checking a loop in 'i8_factor_3' +; FIXED-NO-OPT: Cost of 6 for VF 2: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0> +; FIXED-NO-OPT: Cost of 6 for VF 2: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0> +; FIXED-NO-OPT: Cost of 12 for VF 4: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0> +; FIXED-NO-OPT: Cost of 12 for VF 4: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0> +; FIXED-NO-OPT: Cost of 24 for VF 8: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0> +; FIXED-NO-OPT: Cost of 24 for VF 8: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0> +; FIXED-NO-OPT: Cost of 48 for VF 16: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0> +; FIXED-NO-OPT: Cost of 48 for VF 16: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0> +; FIXED-NO-OPT: Cost of 96 for VF 32: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0> +; FIXED-NO-OPT: Cost of 96 for VF 32: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0> ; NO-OPT-LABEL: Checking a loop in 'i8_factor_3' -; NO-OPT: Cost of 6 for VF 2: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0> -; NO-OPT: Cost of 6 for VF 2: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0> -; NO-OPT: Cost of 12 for VF 4: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0> -; NO-OPT: Cost of 12 for VF 4: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0> -; NO-OPT: Cost of 24 for VF 8: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0> -; NO-OPT: Cost of 24 for VF 8: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0> -; NO-OPT: Cost of 48 for VF 16: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0> -; NO-OPT: Cost of 48 for VF 16: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0> -; NO-OPT: Cost of 96 for VF 32: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0> -; NO-OPT: Cost of 96 for VF 32: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0> ; NO-OPT: Cost of 6 for VF vscale x 1: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0> ; NO-OPT: Cost of 6 for VF vscale x 1: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0> ; NO-OPT: Cost of 12 for VF vscale x 2: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0> @@ -144,17 +156,18 @@ for.end: define void @i8_factor_4(ptr %data, i64 %n) { entry: br label %for.body +; FIXED-OPT-NF4-LABEL: Checking a loop in 'i8_factor_4' +; FIXED-OPT-NF4: Cost of 5 for VF 2: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0> +; FIXED-OPT-NF4: Cost of 5 for VF 2: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0> +; FIXED-OPT-NF4: Cost of 5 for VF 4: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0> +; FIXED-OPT-NF4: Cost of 5 for VF 4: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0> +; FIXED-OPT-NF4: Cost of 6 for VF 8: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0> +; FIXED-OPT-NF4: Cost of 6 for VF 8: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0> +; FIXED-OPT-NF4: Cost of 8 for VF 16: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0> +; FIXED-OPT-NF4: Cost of 8 for VF 16: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0> +; FIXED-OPT-NF4: Cost of 16 for VF 32: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0> +; FIXED-OPT-NF4: Cost of 16 for VF 32: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0> ; OPT-NF4-LABEL: Checking a loop in 'i8_factor_4' -; OPT-NF4: Cost of 5 for VF 2: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0> -; OPT-NF4: Cost of 5 for VF 2: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0> -; OPT-NF4: Cost of 5 for VF 4: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0> -; OPT-NF4: Cost of 5 for VF 4: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0> -; OPT-NF4: Cost of 6 for VF 8: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0> -; OPT-NF4: Cost of 6 for VF 8: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0> -; OPT-NF4: Cost of 8 for VF 16: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0> -; OPT-NF4: Cost of 8 for VF 16: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0> -; OPT-NF4: Cost of 16 for VF 32: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0> -; OPT-NF4: Cost of 16 for VF 32: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0> ; OPT-NF4: Cost of 5 for VF vscale x 1: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0> ; OPT-NF4: Cost of 5 for VF vscale x 1: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0> ; OPT-NF4: Cost of 5 for VF vscale x 2: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0> @@ -165,17 +178,18 @@ entry: ; OPT-NF4: Cost of 8 for VF vscale x 8: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0> ; OPT-NF4: Cost of 16 for VF vscale x 16: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0> ; OPT-NF4: Cost of 16 for VF vscale x 16: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0> +; FIXED-NO-OPT-LABEL: Checking a loop in 'i8_factor_4' +; FIXED-NO-OPT: Cost of 8 for VF 2: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0> +; FIXED-NO-OPT: Cost of 8 for VF 2: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0> +; FIXED-NO-OPT: Cost of 16 for VF 4: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0> +; FIXED-NO-OPT: Cost of 16 for VF 4: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0> +; FIXED-NO-OPT: Cost of 32 for VF 8: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0> +; FIXED-NO-OPT: Cost of 32 for VF 8: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0> +; FIXED-NO-OPT: Cost of 64 for VF 16: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0> +; FIXED-NO-OPT: Cost of 64 for VF 16: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0> +; FIXED-NO-OPT: Cost of 128 for VF 32: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0> +; FIXED-NO-OPT: Cost of 128 for VF 32: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0> ; NO-OPT-LABEL: Checking a loop in 'i8_factor_4' -; NO-OPT: Cost of 8 for VF 2: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0> -; NO-OPT: Cost of 8 for VF 2: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0> -; NO-OPT: Cost of 16 for VF 4: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0> -; NO-OPT: Cost of 16 for VF 4: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0> -; NO-OPT: Cost of 32 for VF 8: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0> -; NO-OPT: Cost of 32 for VF 8: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0> -; NO-OPT: Cost of 64 for VF 16: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0> -; NO-OPT: Cost of 64 for VF 16: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0> -; NO-OPT: Cost of 128 for VF 32: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0> -; NO-OPT: Cost of 128 for VF 32: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0> ; NO-OPT: Cost of 8 for VF vscale x 1: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0> ; NO-OPT: Cost of 8 for VF vscale x 1: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0> ; NO-OPT: Cost of 16 for VF vscale x 2: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0> @@ -216,15 +230,16 @@ for.end: define void @i8_factor_5(ptr %data, i64 %n) { entry: br label %for.body +; FIXED-OPT-NF5-LABEL: Checking a loop in 'i8_factor_5' +; FIXED-OPT-NF5: Cost of 6 for VF 2: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0> +; FIXED-OPT-NF5: Cost of 6 for VF 2: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0> +; FIXED-OPT-NF5: Cost of 7 for VF 4: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0> +; FIXED-OPT-NF5: Cost of 7 for VF 4: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0> +; FIXED-OPT-NF5: Cost of 9 for VF 8: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0> +; FIXED-OPT-NF5: Cost of 9 for VF 8: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0> +; FIXED-OPT-NF5: Cost of 13 for VF 16: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0> +; FIXED-OPT-NF5: Cost of 13 for VF 16: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0> ; OPT-NF5-LABEL: Checking a loop in 'i8_factor_5' -; OPT-NF5: Cost of 6 for VF 2: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0> -; OPT-NF5: Cost of 6 for VF 2: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0> -; OPT-NF5: Cost of 7 for VF 4: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0> -; OPT-NF5: Cost of 7 for VF 4: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0> -; OPT-NF5: Cost of 9 for VF 8: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0> -; OPT-NF5: Cost of 9 for VF 8: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0> -; OPT-NF5: Cost of 13 for VF 16: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0> -; OPT-NF5: Cost of 13 for VF 16: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0> ; OPT-NF5: Cost of 6 for VF vscale x 1: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0> ; OPT-NF5: Cost of 6 for VF vscale x 1: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0> ; OPT-NF5: Cost of 7 for VF vscale x 2: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0> @@ -233,15 +248,16 @@ entry: ; OPT-NF5: Cost of 9 for VF vscale x 4: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0> ; OPT-NF5: Cost of 13 for VF vscale x 8: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0> ; OPT-NF5: Cost of 13 for VF vscale x 8: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0> +; FIXED-NO-OPT-LABEL: Checking a loop in 'i8_factor_5' +; FIXED-NO-OPT: Cost of 10 for VF 2: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0> +; FIXED-NO-OPT: Cost of 10 for VF 2: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0> +; FIXED-NO-OPT: Cost of 20 for VF 4: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0> +; FIXED-NO-OPT: Cost of 20 for VF 4: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0> +; FIXED-NO-OPT: Cost of 40 for VF 8: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0> +; FIXED-NO-OPT: Cost of 40 for VF 8: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0> +; FIXED-NO-OPT: Cost of 80 for VF 16: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0> +; FIXED-NO-OPT: Cost of 80 for VF 16: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0> ; NO-OPT-LABEL: Checking a loop in 'i8_factor_5' -; NO-OPT: Cost of 10 for VF 2: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0> -; NO-OPT: Cost of 10 for VF 2: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0> -; NO-OPT: Cost of 20 for VF 4: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0> -; NO-OPT: Cost of 20 for VF 4: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0> -; NO-OPT: Cost of 40 for VF 8: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0> -; NO-OPT: Cost of 40 for VF 8: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0> -; NO-OPT: Cost of 80 for VF 16: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0> -; NO-OPT: Cost of 80 for VF 16: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0> ; NO-OPT: Cost of 10 for VF vscale x 1: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0> ; NO-OPT: Cost of 10 for VF vscale x 1: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0> ; NO-OPT: Cost of 20 for VF vscale x 2: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0> @@ -284,15 +300,16 @@ for.end: define void @i8_factor_6(ptr %data, i64 %n) { entry: br label %for.body +; FIXED-OPT-NF6-LABEL: Checking a loop in 'i8_factor_6' +; FIXED-OPT-NF6: Cost of 7 for VF 2: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0> +; FIXED-OPT-NF6: Cost of 7 for VF 2: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0> +; FIXED-OPT-NF6: Cost of 8 for VF 4: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0> +; FIXED-OPT-NF6: Cost of 8 for VF 4: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0> +; FIXED-OPT-NF6: Cost of 10 for VF 8: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0> +; FIXED-OPT-NF6: Cost of 10 for VF 8: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0> +; FIXED-OPT-NF6: Cost of 14 for VF 16: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0> +; FIXED-OPT-NF6: Cost of 14 for VF 16: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0> ; OPT-NF6-LABEL: Checking a loop in 'i8_factor_6' -; OPT-NF6: Cost of 7 for VF 2: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0> -; OPT-NF6: Cost of 7 for VF 2: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0> -; OPT-NF6: Cost of 8 for VF 4: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0> -; OPT-NF6: Cost of 8 for VF 4: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0> -; OPT-NF6: Cost of 10 for VF 8: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0> -; OPT-NF6: Cost of 10 for VF 8: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0> -; OPT-NF6: Cost of 14 for VF 16: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0> -; OPT-NF6: Cost of 14 for VF 16: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0> ; OPT-NF6: Cost of 7 for VF vscale x 1: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0> ; OPT-NF6: Cost of 7 for VF vscale x 1: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0> ; OPT-NF6: Cost of 8 for VF vscale x 2: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0> @@ -301,15 +318,16 @@ entry: ; OPT-NF6: Cost of 10 for VF vscale x 4: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0> ; OPT-NF6: Cost of 14 for VF vscale x 8: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0> ; OPT-NF6: Cost of 14 for VF vscale x 8: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0> +; FIXED-NO-OPT-LABEL: Checking a loop in 'i8_factor_6' +; FIXED-NO-OPT: Cost of 12 for VF 2: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0> +; FIXED-NO-OPT: Cost of 12 for VF 2: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0> +; FIXED-NO-OPT: Cost of 24 for VF 4: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0> +; FIXED-NO-OPT: Cost of 24 for VF 4: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0> +; FIXED-NO-OPT: Cost of 48 for VF 8: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0> +; FIXED-NO-OPT: Cost of 48 for VF 8: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0> +; FIXED-NO-OPT: Cost of 96 for VF 16: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0> +; FIXED-NO-OPT: Cost of 96 for VF 16: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0> ; NO-OPT-LABEL: Checking a loop in 'i8_factor_6' -; NO-OPT: Cost of 12 for VF 2: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0> -; NO-OPT: Cost of 12 for VF 2: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0> -; NO-OPT: Cost of 24 for VF 4: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0> -; NO-OPT: Cost of 24 for VF 4: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0> -; NO-OPT: Cost of 48 for VF 8: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0> -; NO-OPT: Cost of 48 for VF 8: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0> -; NO-OPT: Cost of 96 for VF 16: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0> -; NO-OPT: Cost of 96 for VF 16: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0> ; NO-OPT: Cost of 12 for VF vscale x 1: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0> ; NO-OPT: Cost of 12 for VF vscale x 1: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0> ; NO-OPT: Cost of 24 for VF vscale x 2: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0> @@ -356,15 +374,16 @@ for.end: define void @i8_factor_7(ptr %data, i64 %n) { entry: br label %for.body +; FIXED-OPT-NF7-LABEL: Checking a loop in 'i8_factor_7' +; FIXED-OPT-NF7: Cost of 8 for VF 2: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0> +; FIXED-OPT-NF7: Cost of 8 for VF 2: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0> +; FIXED-OPT-NF7: Cost of 9 for VF 4: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0> +; FIXED-OPT-NF7: Cost of 9 for VF 4: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0> +; FIXED-OPT-NF7: Cost of 11 for VF 8: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0> +; FIXED-OPT-NF7: Cost of 11 for VF 8: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0> +; FIXED-OPT-NF7: Cost of 15 for VF 16: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0> +; FIXED-OPT-NF7: Cost of 15 for VF 16: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0> ; OPT-NF7-LABEL: Checking a loop in 'i8_factor_7' -; OPT-NF7: Cost of 8 for VF 2: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0> -; OPT-NF7: Cost of 8 for VF 2: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0> -; OPT-NF7: Cost of 9 for VF 4: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0> -; OPT-NF7: Cost of 9 for VF 4: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0> -; OPT-NF7: Cost of 11 for VF 8: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0> -; OPT-NF7: Cost of 11 for VF 8: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0> -; OPT-NF7: Cost of 15 for VF 16: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0> -; OPT-NF7: Cost of 15 for VF 16: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0> ; OPT-NF7: Cost of 8 for VF vscale x 1: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0> ; OPT-NF7: Cost of 8 for VF vscale x 1: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0> ; OPT-NF7: Cost of 9 for VF vscale x 2: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0> @@ -373,15 +392,16 @@ entry: ; OPT-NF7: Cost of 11 for VF vscale x 4: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0> ; OPT-NF7: Cost of 15 for VF vscale x 8: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0> ; OPT-NF7: Cost of 15 for VF vscale x 8: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0> +; FIXED-NO-OPT-LABEL: Checking a loop in 'i8_factor_7' +; FIXED-NO-OPT: Cost of 14 for VF 2: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0> +; FIXED-NO-OPT: Cost of 14 for VF 2: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0> +; FIXED-NO-OPT: Cost of 28 for VF 4: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0> +; FIXED-NO-OPT: Cost of 28 for VF 4: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0> +; FIXED-NO-OPT: Cost of 56 for VF 8: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0> +; FIXED-NO-OPT: Cost of 56 for VF 8: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0> +; FIXED-NO-OPT: Cost of 112 for VF 16: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0> +; FIXED-NO-OPT: Cost of 112 for VF 16: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0> ; NO-OPT-LABEL: Checking a loop in 'i8_factor_7' -; NO-OPT: Cost of 14 for VF 2: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0> -; NO-OPT: Cost of 14 for VF 2: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0> -; NO-OPT: Cost of 28 for VF 4: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0> -; NO-OPT: Cost of 28 for VF 4: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0> -; NO-OPT: Cost of 56 for VF 8: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0> -; NO-OPT: Cost of 56 for VF 8: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0> -; NO-OPT: Cost of 112 for VF 16: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0> -; NO-OPT: Cost of 112 for VF 16: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0> ; NO-OPT: Cost of 14 for VF vscale x 1: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0> ; NO-OPT: Cost of 14 for VF vscale x 1: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0> ; NO-OPT: Cost of 28 for VF vscale x 2: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0> @@ -432,15 +452,16 @@ for.end: define void @i8_factor_8(ptr %data, i64 %n) { entry: br label %for.body +; FIXED-OPT-NF8-LABEL: Checking a loop in 'i8_factor_8' +; FIXED-OPT-NF8: Cost of 9 for VF 2: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0> +; FIXED-OPT-NF8: Cost of 9 for VF 2: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0> +; FIXED-OPT-NF8: Cost of 10 for VF 4: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0> +; FIXED-OPT-NF8: Cost of 10 for VF 4: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0> +; FIXED-OPT-NF8: Cost of 12 for VF 8: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0> +; FIXED-OPT-NF8: Cost of 12 for VF 8: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0> +; FIXED-OPT-NF8: Cost of 16 for VF 16: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0> +; FIXED-OPT-NF8: Cost of 16 for VF 16: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0> ; OPT-NF8-LABEL: Checking a loop in 'i8_factor_8' -; OPT-NF8: Cost of 9 for VF 2: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0> -; OPT-NF8: Cost of 9 for VF 2: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0> -; OPT-NF8: Cost of 10 for VF 4: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0> -; OPT-NF8: Cost of 10 for VF 4: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0> -; OPT-NF8: Cost of 12 for VF 8: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0> -; OPT-NF8: Cost of 12 for VF 8: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0> -; OPT-NF8: Cost of 16 for VF 16: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0> -; OPT-NF8: Cost of 16 for VF 16: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0> ; OPT-NF8: Cost of 9 for VF vscale x 1: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0> ; OPT-NF8: Cost of 9 for VF vscale x 1: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0> ; OPT-NF8: Cost of 10 for VF vscale x 2: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0> @@ -449,15 +470,16 @@ entry: ; OPT-NF8: Cost of 12 for VF vscale x 4: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0> ; OPT-NF8: Cost of 16 for VF vscale x 8: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0> ; OPT-NF8: Cost of 16 for VF vscale x 8: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0> +; FIXED-NO-OPT-LABEL: Checking a loop in 'i8_factor_8' +; FIXED-NO-OPT: Cost of 16 for VF 2: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0> +; FIXED-NO-OPT: Cost of 16 for VF 2: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0> +; FIXED-NO-OPT: Cost of 32 for VF 4: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0> +; FIXED-NO-OPT: Cost of 32 for VF 4: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0> +; FIXED-NO-OPT: Cost of 64 for VF 8: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0> +; FIXED-NO-OPT: Cost of 64 for VF 8: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0> +; FIXED-NO-OPT: Cost of 128 for VF 16: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0> +; FIXED-NO-OPT: Cost of 128 for VF 16: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0> ; NO-OPT-LABEL: Checking a loop in 'i8_factor_8' -; NO-OPT: Cost of 16 for VF 2: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0> -; NO-OPT: Cost of 16 for VF 2: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0> -; NO-OPT: Cost of 32 for VF 4: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0> -; NO-OPT: Cost of 32 for VF 4: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0> -; NO-OPT: Cost of 64 for VF 8: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0> -; NO-OPT: Cost of 64 for VF 8: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0> -; NO-OPT: Cost of 128 for VF 16: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0> -; NO-OPT: Cost of 128 for VF 16: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0> ; NO-OPT: Cost of 16 for VF vscale x 1: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0> ; NO-OPT: Cost of 16 for VF vscale x 1: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0> ; NO-OPT: Cost of 32 for VF vscale x 2: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0> diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-masked-access.ll b/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-masked-access.ll index 976ce77..1bceb87 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-masked-access.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-masked-access.ll @@ -18,37 +18,35 @@ define void @masked_strided_factor2(ptr noalias nocapture readonly %p, ptr noali ; SCALAR_EPILOGUE-NEXT: [[TMP2:%.*]] = shl nuw i32 [[TMP1]], 4 ; SCALAR_EPILOGUE-NEXT: [[N_MOD_VF:%.*]] = urem i32 1024, [[TMP2]] ; SCALAR_EPILOGUE-NEXT: [[N_VEC:%.*]] = sub nuw nsw i32 1024, [[N_MOD_VF]] -; SCALAR_EPILOGUE-NEXT: [[TMP3:%.*]] = call i32 @llvm.vscale.i32() -; SCALAR_EPILOGUE-NEXT: [[TMP4:%.*]] = shl nuw i32 [[TMP3]], 4 ; SCALAR_EPILOGUE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[CONV]], i64 0 ; SCALAR_EPILOGUE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer -; SCALAR_EPILOGUE-NEXT: [[TMP5:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32() -; SCALAR_EPILOGUE-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP4]], i64 0 +; SCALAR_EPILOGUE-NEXT: [[TMP3:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32() +; SCALAR_EPILOGUE-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP2]], i64 0 ; SCALAR_EPILOGUE-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer ; SCALAR_EPILOGUE-NEXT: br label [[VECTOR_BODY:%.*]] ; SCALAR_EPILOGUE: vector.body: ; SCALAR_EPILOGUE-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; SCALAR_EPILOGUE-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP5]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] -; SCALAR_EPILOGUE-NEXT: [[TMP6:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]] -; SCALAR_EPILOGUE-NEXT: [[TMP7:%.*]] = shl i32 [[INDEX]], 1 -; SCALAR_EPILOGUE-NEXT: [[TMP8:%.*]] = sext i32 [[TMP7]] to i64 -; SCALAR_EPILOGUE-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP8]] -; SCALAR_EPILOGUE-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 32 x i1> @llvm.vector.interleave2.nxv32i1(<vscale x 16 x i1> [[TMP6]], <vscale x 16 x i1> [[TMP6]]) -; SCALAR_EPILOGUE-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 32 x i8> @llvm.masked.load.nxv32i8.p0(ptr [[TMP9]], i32 1, <vscale x 32 x i1> [[INTERLEAVED_MASK]], <vscale x 32 x i8> poison) +; SCALAR_EPILOGUE-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP3]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] +; SCALAR_EPILOGUE-NEXT: [[TMP4:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]] +; SCALAR_EPILOGUE-NEXT: [[TMP5:%.*]] = shl i32 [[INDEX]], 1 +; SCALAR_EPILOGUE-NEXT: [[TMP6:%.*]] = sext i32 [[TMP5]] to i64 +; SCALAR_EPILOGUE-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP6]] +; SCALAR_EPILOGUE-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 32 x i1> @llvm.vector.interleave2.nxv32i1(<vscale x 16 x i1> [[TMP4]], <vscale x 16 x i1> [[TMP4]]) +; SCALAR_EPILOGUE-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 32 x i8> @llvm.masked.load.nxv32i8.p0(ptr [[TMP7]], i32 1, <vscale x 32 x i1> [[INTERLEAVED_MASK]], <vscale x 32 x i8> poison) ; SCALAR_EPILOGUE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.vector.deinterleave2.nxv32i8(<vscale x 32 x i8> [[WIDE_MASKED_VEC]]) -; SCALAR_EPILOGUE-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 0 -; SCALAR_EPILOGUE-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 1 -; SCALAR_EPILOGUE-NEXT: [[TMP12:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[TMP10]], <vscale x 16 x i8> [[TMP11]]) -; SCALAR_EPILOGUE-NEXT: [[TMP13:%.*]] = sext i32 [[TMP7]] to i64 -; SCALAR_EPILOGUE-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[Q]], i64 [[TMP13]] -; SCALAR_EPILOGUE-NEXT: [[TMP15:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP12]] -; SCALAR_EPILOGUE-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 32 x i8> @llvm.vector.interleave2.nxv32i8(<vscale x 16 x i8> [[TMP12]], <vscale x 16 x i8> [[TMP15]]) -; SCALAR_EPILOGUE-NEXT: [[INTERLEAVED_MASK3:%.*]] = call <vscale x 32 x i1> @llvm.vector.interleave2.nxv32i1(<vscale x 16 x i1> [[TMP6]], <vscale x 16 x i1> [[TMP6]]) -; SCALAR_EPILOGUE-NEXT: call void @llvm.masked.store.nxv32i8.p0(<vscale x 32 x i8> [[INTERLEAVED_VEC]], ptr [[TMP14]], i32 1, <vscale x 32 x i1> [[INTERLEAVED_MASK3]]) -; SCALAR_EPILOGUE-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP4]] +; SCALAR_EPILOGUE-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 0 +; SCALAR_EPILOGUE-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 1 +; SCALAR_EPILOGUE-NEXT: [[TMP10:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[TMP8]], <vscale x 16 x i8> [[TMP9]]) +; SCALAR_EPILOGUE-NEXT: [[TMP11:%.*]] = sext i32 [[TMP5]] to i64 +; SCALAR_EPILOGUE-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[Q]], i64 [[TMP11]] +; SCALAR_EPILOGUE-NEXT: [[TMP13:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP10]] +; SCALAR_EPILOGUE-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 32 x i8> @llvm.vector.interleave2.nxv32i8(<vscale x 16 x i8> [[TMP10]], <vscale x 16 x i8> [[TMP13]]) +; SCALAR_EPILOGUE-NEXT: [[INTERLEAVED_MASK3:%.*]] = call <vscale x 32 x i1> @llvm.vector.interleave2.nxv32i1(<vscale x 16 x i1> [[TMP4]], <vscale x 16 x i1> [[TMP4]]) +; SCALAR_EPILOGUE-NEXT: call void @llvm.masked.store.nxv32i8.p0(<vscale x 32 x i8> [[INTERLEAVED_VEC]], ptr [[TMP12]], i32 1, <vscale x 32 x i1> [[INTERLEAVED_MASK3]]) +; SCALAR_EPILOGUE-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP2]] ; SCALAR_EPILOGUE-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT2]] -; SCALAR_EPILOGUE-NEXT: [[TMP16:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; SCALAR_EPILOGUE-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; SCALAR_EPILOGUE-NEXT: [[TMP14:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] +; SCALAR_EPILOGUE-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; SCALAR_EPILOGUE: middle.block: ; SCALAR_EPILOGUE-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N_MOD_VF]], 0 ; SCALAR_EPILOGUE-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] @@ -65,39 +63,37 @@ define void @masked_strided_factor2(ptr noalias nocapture readonly %p, ptr noali ; PREDICATED_DATA-NEXT: [[N_RND_UP:%.*]] = add i32 [[TMP1]], 1023 ; PREDICATED_DATA-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N_RND_UP]], [[TMP1]] ; PREDICATED_DATA-NEXT: [[N_VEC:%.*]] = sub i32 [[N_RND_UP]], [[N_MOD_VF]] -; PREDICATED_DATA-NEXT: [[TMP2:%.*]] = call i32 @llvm.vscale.i32() -; PREDICATED_DATA-NEXT: [[TMP3:%.*]] = shl nuw i32 [[TMP2]], 4 ; PREDICATED_DATA-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[CONV]], i64 0 ; PREDICATED_DATA-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer -; PREDICATED_DATA-NEXT: [[TMP4:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32() -; PREDICATED_DATA-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP3]], i64 0 +; PREDICATED_DATA-NEXT: [[TMP2:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32() +; PREDICATED_DATA-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP1]], i64 0 ; PREDICATED_DATA-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer ; PREDICATED_DATA-NEXT: br label [[VECTOR_BODY:%.*]] ; PREDICATED_DATA: vector.body: ; PREDICATED_DATA-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; PREDICATED_DATA-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP4]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] +; PREDICATED_DATA-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP2]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] ; PREDICATED_DATA-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32 [[INDEX]], i32 1024) -; PREDICATED_DATA-NEXT: [[TMP5:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]] -; PREDICATED_DATA-NEXT: [[TMP6:%.*]] = select <vscale x 16 x i1> [[ACTIVE_LANE_MASK]], <vscale x 16 x i1> [[TMP5]], <vscale x 16 x i1> zeroinitializer -; PREDICATED_DATA-NEXT: [[TMP7:%.*]] = shl i32 [[INDEX]], 1 -; PREDICATED_DATA-NEXT: [[TMP8:%.*]] = sext i32 [[TMP7]] to i64 -; PREDICATED_DATA-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP8]] -; PREDICATED_DATA-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 32 x i1> @llvm.vector.interleave2.nxv32i1(<vscale x 16 x i1> [[TMP6]], <vscale x 16 x i1> [[TMP6]]) -; PREDICATED_DATA-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 32 x i8> @llvm.masked.load.nxv32i8.p0(ptr [[TMP9]], i32 1, <vscale x 32 x i1> [[INTERLEAVED_MASK]], <vscale x 32 x i8> poison) +; PREDICATED_DATA-NEXT: [[TMP3:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]] +; PREDICATED_DATA-NEXT: [[TMP4:%.*]] = select <vscale x 16 x i1> [[ACTIVE_LANE_MASK]], <vscale x 16 x i1> [[TMP3]], <vscale x 16 x i1> zeroinitializer +; PREDICATED_DATA-NEXT: [[TMP5:%.*]] = shl i32 [[INDEX]], 1 +; PREDICATED_DATA-NEXT: [[TMP6:%.*]] = sext i32 [[TMP5]] to i64 +; PREDICATED_DATA-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP6]] +; PREDICATED_DATA-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 32 x i1> @llvm.vector.interleave2.nxv32i1(<vscale x 16 x i1> [[TMP4]], <vscale x 16 x i1> [[TMP4]]) +; PREDICATED_DATA-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 32 x i8> @llvm.masked.load.nxv32i8.p0(ptr [[TMP7]], i32 1, <vscale x 32 x i1> [[INTERLEAVED_MASK]], <vscale x 32 x i8> poison) ; PREDICATED_DATA-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.vector.deinterleave2.nxv32i8(<vscale x 32 x i8> [[WIDE_MASKED_VEC]]) -; PREDICATED_DATA-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 0 -; PREDICATED_DATA-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 1 -; PREDICATED_DATA-NEXT: [[TMP12:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[TMP10]], <vscale x 16 x i8> [[TMP11]]) -; PREDICATED_DATA-NEXT: [[TMP13:%.*]] = sext i32 [[TMP7]] to i64 -; PREDICATED_DATA-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[Q]], i64 [[TMP13]] -; PREDICATED_DATA-NEXT: [[TMP15:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP12]] -; PREDICATED_DATA-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 32 x i8> @llvm.vector.interleave2.nxv32i8(<vscale x 16 x i8> [[TMP12]], <vscale x 16 x i8> [[TMP15]]) -; PREDICATED_DATA-NEXT: [[INTERLEAVED_MASK3:%.*]] = call <vscale x 32 x i1> @llvm.vector.interleave2.nxv32i1(<vscale x 16 x i1> [[TMP6]], <vscale x 16 x i1> [[TMP6]]) -; PREDICATED_DATA-NEXT: call void @llvm.masked.store.nxv32i8.p0(<vscale x 32 x i8> [[INTERLEAVED_VEC]], ptr [[TMP14]], i32 1, <vscale x 32 x i1> [[INTERLEAVED_MASK3]]) -; PREDICATED_DATA-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP3]] +; PREDICATED_DATA-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 0 +; PREDICATED_DATA-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 1 +; PREDICATED_DATA-NEXT: [[TMP10:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[TMP8]], <vscale x 16 x i8> [[TMP9]]) +; PREDICATED_DATA-NEXT: [[TMP11:%.*]] = sext i32 [[TMP5]] to i64 +; PREDICATED_DATA-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[Q]], i64 [[TMP11]] +; PREDICATED_DATA-NEXT: [[TMP13:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP10]] +; PREDICATED_DATA-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 32 x i8> @llvm.vector.interleave2.nxv32i8(<vscale x 16 x i8> [[TMP10]], <vscale x 16 x i8> [[TMP13]]) +; PREDICATED_DATA-NEXT: [[INTERLEAVED_MASK3:%.*]] = call <vscale x 32 x i1> @llvm.vector.interleave2.nxv32i1(<vscale x 16 x i1> [[TMP4]], <vscale x 16 x i1> [[TMP4]]) +; PREDICATED_DATA-NEXT: call void @llvm.masked.store.nxv32i8.p0(<vscale x 32 x i8> [[INTERLEAVED_VEC]], ptr [[TMP12]], i32 1, <vscale x 32 x i1> [[INTERLEAVED_MASK3]]) +; PREDICATED_DATA-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP1]] ; PREDICATED_DATA-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT2]] -; PREDICATED_DATA-NEXT: [[TMP16:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; PREDICATED_DATA-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; PREDICATED_DATA-NEXT: [[TMP14:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] +; PREDICATED_DATA-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; PREDICATED_DATA: middle.block: ; PREDICATED_DATA-NEXT: br label [[FOR_END:%.*]] ; PREDICATED_DATA: scalar.ph: @@ -197,41 +193,39 @@ define void @masked_strided_factor4(ptr noalias nocapture readonly %p, ptr noali ; SCALAR_EPILOGUE-NEXT: [[TMP2:%.*]] = shl nuw i32 [[TMP1]], 4 ; SCALAR_EPILOGUE-NEXT: [[N_MOD_VF:%.*]] = urem i32 1024, [[TMP2]] ; SCALAR_EPILOGUE-NEXT: [[N_VEC:%.*]] = sub nuw nsw i32 1024, [[N_MOD_VF]] -; SCALAR_EPILOGUE-NEXT: [[TMP3:%.*]] = call i32 @llvm.vscale.i32() -; SCALAR_EPILOGUE-NEXT: [[TMP4:%.*]] = shl nuw i32 [[TMP3]], 4 ; SCALAR_EPILOGUE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[CONV]], i64 0 ; SCALAR_EPILOGUE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer -; SCALAR_EPILOGUE-NEXT: [[TMP5:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32() -; SCALAR_EPILOGUE-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP4]], i64 0 +; SCALAR_EPILOGUE-NEXT: [[TMP3:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32() +; SCALAR_EPILOGUE-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP2]], i64 0 ; SCALAR_EPILOGUE-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer ; SCALAR_EPILOGUE-NEXT: br label [[VECTOR_BODY:%.*]] ; SCALAR_EPILOGUE: vector.body: ; SCALAR_EPILOGUE-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; SCALAR_EPILOGUE-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP5]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] -; SCALAR_EPILOGUE-NEXT: [[TMP6:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]] -; SCALAR_EPILOGUE-NEXT: [[TMP7:%.*]] = shl i32 [[INDEX]], 2 -; SCALAR_EPILOGUE-NEXT: [[TMP8:%.*]] = sext i32 [[TMP7]] to i64 -; SCALAR_EPILOGUE-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP8]] -; SCALAR_EPILOGUE-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 64 x i1> @llvm.vector.interleave4.nxv64i1(<vscale x 16 x i1> [[TMP6]], <vscale x 16 x i1> [[TMP6]], <vscale x 16 x i1> [[TMP6]], <vscale x 16 x i1> [[TMP6]]) -; SCALAR_EPILOGUE-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 64 x i8> @llvm.masked.load.nxv64i8.p0(ptr [[TMP9]], i32 1, <vscale x 64 x i1> [[INTERLEAVED_MASK]], <vscale x 64 x i8> poison) +; SCALAR_EPILOGUE-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP3]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] +; SCALAR_EPILOGUE-NEXT: [[TMP4:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]] +; SCALAR_EPILOGUE-NEXT: [[TMP5:%.*]] = shl i32 [[INDEX]], 2 +; SCALAR_EPILOGUE-NEXT: [[TMP6:%.*]] = sext i32 [[TMP5]] to i64 +; SCALAR_EPILOGUE-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP6]] +; SCALAR_EPILOGUE-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 64 x i1> @llvm.vector.interleave4.nxv64i1(<vscale x 16 x i1> [[TMP4]], <vscale x 16 x i1> [[TMP4]], <vscale x 16 x i1> [[TMP4]], <vscale x 16 x i1> [[TMP4]]) +; SCALAR_EPILOGUE-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 64 x i8> @llvm.masked.load.nxv64i8.p0(ptr [[TMP7]], i32 1, <vscale x 64 x i1> [[INTERLEAVED_MASK]], <vscale x 64 x i8> poison) ; SCALAR_EPILOGUE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.vector.deinterleave4.nxv64i8(<vscale x 64 x i8> [[WIDE_MASKED_VEC]]) -; SCALAR_EPILOGUE-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 0 -; SCALAR_EPILOGUE-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 1 -; SCALAR_EPILOGUE-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 2 -; SCALAR_EPILOGUE-NEXT: [[TMP13:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 3 +; SCALAR_EPILOGUE-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 0 +; SCALAR_EPILOGUE-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 1 +; SCALAR_EPILOGUE-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 2 +; SCALAR_EPILOGUE-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 3 +; SCALAR_EPILOGUE-NEXT: [[TMP12:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[TMP8]], <vscale x 16 x i8> [[TMP9]]) +; SCALAR_EPILOGUE-NEXT: [[TMP13:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP12]] ; SCALAR_EPILOGUE-NEXT: [[TMP14:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[TMP10]], <vscale x 16 x i8> [[TMP11]]) ; SCALAR_EPILOGUE-NEXT: [[TMP15:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP14]] -; SCALAR_EPILOGUE-NEXT: [[TMP16:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[TMP12]], <vscale x 16 x i8> [[TMP13]]) -; SCALAR_EPILOGUE-NEXT: [[TMP17:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP16]] -; SCALAR_EPILOGUE-NEXT: [[TMP18:%.*]] = sext i32 [[TMP7]] to i64 -; SCALAR_EPILOGUE-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[Q]], i64 [[TMP18]] -; SCALAR_EPILOGUE-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 64 x i8> @llvm.vector.interleave4.nxv64i8(<vscale x 16 x i8> [[TMP14]], <vscale x 16 x i8> [[TMP15]], <vscale x 16 x i8> [[TMP16]], <vscale x 16 x i8> [[TMP17]]) -; SCALAR_EPILOGUE-NEXT: [[INTERLEAVED_MASK3:%.*]] = call <vscale x 64 x i1> @llvm.vector.interleave4.nxv64i1(<vscale x 16 x i1> [[TMP6]], <vscale x 16 x i1> [[TMP6]], <vscale x 16 x i1> [[TMP6]], <vscale x 16 x i1> [[TMP6]]) -; SCALAR_EPILOGUE-NEXT: call void @llvm.masked.store.nxv64i8.p0(<vscale x 64 x i8> [[INTERLEAVED_VEC]], ptr [[TMP19]], i32 1, <vscale x 64 x i1> [[INTERLEAVED_MASK3]]) -; SCALAR_EPILOGUE-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP4]] +; SCALAR_EPILOGUE-NEXT: [[TMP16:%.*]] = sext i32 [[TMP5]] to i64 +; SCALAR_EPILOGUE-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[Q]], i64 [[TMP16]] +; SCALAR_EPILOGUE-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 64 x i8> @llvm.vector.interleave4.nxv64i8(<vscale x 16 x i8> [[TMP12]], <vscale x 16 x i8> [[TMP13]], <vscale x 16 x i8> [[TMP14]], <vscale x 16 x i8> [[TMP15]]) +; SCALAR_EPILOGUE-NEXT: [[INTERLEAVED_MASK3:%.*]] = call <vscale x 64 x i1> @llvm.vector.interleave4.nxv64i1(<vscale x 16 x i1> [[TMP4]], <vscale x 16 x i1> [[TMP4]], <vscale x 16 x i1> [[TMP4]], <vscale x 16 x i1> [[TMP4]]) +; SCALAR_EPILOGUE-NEXT: call void @llvm.masked.store.nxv64i8.p0(<vscale x 64 x i8> [[INTERLEAVED_VEC]], ptr [[TMP17]], i32 1, <vscale x 64 x i1> [[INTERLEAVED_MASK3]]) +; SCALAR_EPILOGUE-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP2]] ; SCALAR_EPILOGUE-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT2]] -; SCALAR_EPILOGUE-NEXT: [[TMP20:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; SCALAR_EPILOGUE-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; SCALAR_EPILOGUE-NEXT: [[TMP18:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] +; SCALAR_EPILOGUE-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; SCALAR_EPILOGUE: middle.block: ; SCALAR_EPILOGUE-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N_MOD_VF]], 0 ; SCALAR_EPILOGUE-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] @@ -248,43 +242,41 @@ define void @masked_strided_factor4(ptr noalias nocapture readonly %p, ptr noali ; PREDICATED_DATA-NEXT: [[N_RND_UP:%.*]] = add i32 [[TMP1]], 1023 ; PREDICATED_DATA-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N_RND_UP]], [[TMP1]] ; PREDICATED_DATA-NEXT: [[N_VEC:%.*]] = sub i32 [[N_RND_UP]], [[N_MOD_VF]] -; PREDICATED_DATA-NEXT: [[TMP2:%.*]] = call i32 @llvm.vscale.i32() -; PREDICATED_DATA-NEXT: [[TMP3:%.*]] = shl nuw i32 [[TMP2]], 4 ; PREDICATED_DATA-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[CONV]], i64 0 ; PREDICATED_DATA-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer -; PREDICATED_DATA-NEXT: [[TMP4:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32() -; PREDICATED_DATA-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP3]], i64 0 +; PREDICATED_DATA-NEXT: [[TMP2:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32() +; PREDICATED_DATA-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP1]], i64 0 ; PREDICATED_DATA-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer ; PREDICATED_DATA-NEXT: br label [[VECTOR_BODY:%.*]] ; PREDICATED_DATA: vector.body: ; PREDICATED_DATA-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; PREDICATED_DATA-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP4]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] +; PREDICATED_DATA-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP2]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] ; PREDICATED_DATA-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32 [[INDEX]], i32 1024) -; PREDICATED_DATA-NEXT: [[TMP5:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]] -; PREDICATED_DATA-NEXT: [[TMP6:%.*]] = select <vscale x 16 x i1> [[ACTIVE_LANE_MASK]], <vscale x 16 x i1> [[TMP5]], <vscale x 16 x i1> zeroinitializer -; PREDICATED_DATA-NEXT: [[TMP7:%.*]] = shl i32 [[INDEX]], 2 -; PREDICATED_DATA-NEXT: [[TMP8:%.*]] = sext i32 [[TMP7]] to i64 -; PREDICATED_DATA-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP8]] -; PREDICATED_DATA-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 64 x i1> @llvm.vector.interleave4.nxv64i1(<vscale x 16 x i1> [[TMP6]], <vscale x 16 x i1> [[TMP6]], <vscale x 16 x i1> [[TMP6]], <vscale x 16 x i1> [[TMP6]]) -; PREDICATED_DATA-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 64 x i8> @llvm.masked.load.nxv64i8.p0(ptr [[TMP9]], i32 1, <vscale x 64 x i1> [[INTERLEAVED_MASK]], <vscale x 64 x i8> poison) +; PREDICATED_DATA-NEXT: [[TMP3:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]] +; PREDICATED_DATA-NEXT: [[TMP4:%.*]] = select <vscale x 16 x i1> [[ACTIVE_LANE_MASK]], <vscale x 16 x i1> [[TMP3]], <vscale x 16 x i1> zeroinitializer +; PREDICATED_DATA-NEXT: [[TMP5:%.*]] = shl i32 [[INDEX]], 2 +; PREDICATED_DATA-NEXT: [[TMP6:%.*]] = sext i32 [[TMP5]] to i64 +; PREDICATED_DATA-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP6]] +; PREDICATED_DATA-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 64 x i1> @llvm.vector.interleave4.nxv64i1(<vscale x 16 x i1> [[TMP4]], <vscale x 16 x i1> [[TMP4]], <vscale x 16 x i1> [[TMP4]], <vscale x 16 x i1> [[TMP4]]) +; PREDICATED_DATA-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 64 x i8> @llvm.masked.load.nxv64i8.p0(ptr [[TMP7]], i32 1, <vscale x 64 x i1> [[INTERLEAVED_MASK]], <vscale x 64 x i8> poison) ; PREDICATED_DATA-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.vector.deinterleave4.nxv64i8(<vscale x 64 x i8> [[WIDE_MASKED_VEC]]) -; PREDICATED_DATA-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 0 -; PREDICATED_DATA-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 1 -; PREDICATED_DATA-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 2 -; PREDICATED_DATA-NEXT: [[TMP13:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 3 +; PREDICATED_DATA-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 0 +; PREDICATED_DATA-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 1 +; PREDICATED_DATA-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 2 +; PREDICATED_DATA-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 3 +; PREDICATED_DATA-NEXT: [[TMP12:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[TMP8]], <vscale x 16 x i8> [[TMP9]]) +; PREDICATED_DATA-NEXT: [[TMP13:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP12]] ; PREDICATED_DATA-NEXT: [[TMP14:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[TMP10]], <vscale x 16 x i8> [[TMP11]]) ; PREDICATED_DATA-NEXT: [[TMP15:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP14]] -; PREDICATED_DATA-NEXT: [[TMP16:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[TMP12]], <vscale x 16 x i8> [[TMP13]]) -; PREDICATED_DATA-NEXT: [[TMP17:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP16]] -; PREDICATED_DATA-NEXT: [[TMP18:%.*]] = sext i32 [[TMP7]] to i64 -; PREDICATED_DATA-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[Q]], i64 [[TMP18]] -; PREDICATED_DATA-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 64 x i8> @llvm.vector.interleave4.nxv64i8(<vscale x 16 x i8> [[TMP14]], <vscale x 16 x i8> [[TMP15]], <vscale x 16 x i8> [[TMP16]], <vscale x 16 x i8> [[TMP17]]) -; PREDICATED_DATA-NEXT: [[INTERLEAVED_MASK3:%.*]] = call <vscale x 64 x i1> @llvm.vector.interleave4.nxv64i1(<vscale x 16 x i1> [[TMP6]], <vscale x 16 x i1> [[TMP6]], <vscale x 16 x i1> [[TMP6]], <vscale x 16 x i1> [[TMP6]]) -; PREDICATED_DATA-NEXT: call void @llvm.masked.store.nxv64i8.p0(<vscale x 64 x i8> [[INTERLEAVED_VEC]], ptr [[TMP19]], i32 1, <vscale x 64 x i1> [[INTERLEAVED_MASK3]]) -; PREDICATED_DATA-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP3]] +; PREDICATED_DATA-NEXT: [[TMP16:%.*]] = sext i32 [[TMP5]] to i64 +; PREDICATED_DATA-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[Q]], i64 [[TMP16]] +; PREDICATED_DATA-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 64 x i8> @llvm.vector.interleave4.nxv64i8(<vscale x 16 x i8> [[TMP12]], <vscale x 16 x i8> [[TMP13]], <vscale x 16 x i8> [[TMP14]], <vscale x 16 x i8> [[TMP15]]) +; PREDICATED_DATA-NEXT: [[INTERLEAVED_MASK3:%.*]] = call <vscale x 64 x i1> @llvm.vector.interleave4.nxv64i1(<vscale x 16 x i1> [[TMP4]], <vscale x 16 x i1> [[TMP4]], <vscale x 16 x i1> [[TMP4]], <vscale x 16 x i1> [[TMP4]]) +; PREDICATED_DATA-NEXT: call void @llvm.masked.store.nxv64i8.p0(<vscale x 64 x i8> [[INTERLEAVED_VEC]], ptr [[TMP17]], i32 1, <vscale x 64 x i1> [[INTERLEAVED_MASK3]]) +; PREDICATED_DATA-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP1]] ; PREDICATED_DATA-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT2]] -; PREDICATED_DATA-NEXT: [[TMP20:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; PREDICATED_DATA-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; PREDICATED_DATA-NEXT: [[TMP18:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] +; PREDICATED_DATA-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; PREDICATED_DATA: middle.block: ; PREDICATED_DATA-NEXT: br label [[FOR_END:%.*]] ; PREDICATED_DATA: scalar.ph: diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/lmul.ll b/llvm/test/Transforms/LoopVectorize/RISCV/lmul.ll index 93e0f90..f731d39 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/lmul.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/lmul.ll @@ -15,7 +15,6 @@ define void @load_store(ptr %p) { ; LMUL1-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() ; LMUL1-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP1]] ; LMUL1-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] -; LMUL1-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() ; LMUL1-NEXT: br label [[VECTOR_BODY:%.*]] ; LMUL1: vector.body: ; LMUL1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -23,7 +22,7 @@ define void @load_store(ptr %p) { ; LMUL1-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 1 x i64>, ptr [[TMP3]], align 8 ; LMUL1-NEXT: [[TMP5:%.*]] = add <vscale x 1 x i64> [[WIDE_LOAD]], splat (i64 1) ; LMUL1-NEXT: store <vscale x 1 x i64> [[TMP5]], ptr [[TMP3]], align 8 -; LMUL1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] +; LMUL1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]] ; LMUL1-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; LMUL1-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; LMUL1: middle.block: @@ -46,124 +45,112 @@ define void @load_store(ptr %p) { ; ; LMUL2-LABEL: @load_store( ; LMUL2-NEXT: entry: -; LMUL2-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; LMUL2-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; LMUL2-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]] -; LMUL2-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; LMUL2-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; LMUL2: vector.ph: -; LMUL2-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; LMUL2-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 -; LMUL2-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] -; LMUL2-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; LMUL2-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() ; LMUL2-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 2 ; LMUL2-NEXT: br label [[VECTOR_BODY:%.*]] ; LMUL2: vector.body: ; LMUL2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; LMUL2-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; LMUL2-NEXT: [[TMP6:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) ; LMUL2-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[P:%.*]], i64 [[INDEX]] -; LMUL2-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP5]], align 8 +; LMUL2-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP5]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP6]]) ; LMUL2-NEXT: [[TMP7:%.*]] = add <vscale x 2 x i64> [[WIDE_LOAD]], splat (i64 1) -; LMUL2-NEXT: store <vscale x 2 x i64> [[TMP7]], ptr [[TMP5]], align 8 -; LMUL2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP9]] -; LMUL2-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; LMUL2-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; LMUL2-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP7]], ptr align 8 [[TMP5]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP6]]) +; LMUL2-NEXT: [[TMP10:%.*]] = zext i32 [[TMP6]] to i64 +; LMUL2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP10]], [[INDEX]] +; LMUL2-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]] +; LMUL2-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; LMUL2-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; LMUL2: middle.block: -; LMUL2-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; LMUL2-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] +; LMUL2-NEXT: br label [[FOR_END:%.*]] ; LMUL2: scalar.ph: -; LMUL2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; LMUL2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; LMUL2-NEXT: br label [[FOR_BODY:%.*]] ; LMUL2: for.body: -; LMUL2-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; LMUL2-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; LMUL2-NEXT: [[Q:%.*]] = getelementptr inbounds i64, ptr [[P]], i64 [[IV]] ; LMUL2-NEXT: [[V:%.*]] = load i64, ptr [[Q]], align 8 ; LMUL2-NEXT: [[W:%.*]] = add i64 [[V]], 1 ; LMUL2-NEXT: store i64 [[W]], ptr [[Q]], align 8 ; LMUL2-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; LMUL2-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; LMUL2-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; LMUL2-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; LMUL2: for.end: ; LMUL2-NEXT: ret void ; ; LMUL4-LABEL: @load_store( ; LMUL4-NEXT: entry: -; LMUL4-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; LMUL4-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; LMUL4-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]] -; LMUL4-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; LMUL4-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; LMUL4: vector.ph: -; LMUL4-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; LMUL4-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; LMUL4-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] -; LMUL4-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; LMUL4-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() ; LMUL4-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 4 ; LMUL4-NEXT: br label [[VECTOR_BODY:%.*]] ; LMUL4: vector.body: ; LMUL4-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; LMUL4-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; LMUL4-NEXT: [[TMP6:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; LMUL4-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[P:%.*]], i64 [[INDEX]] -; LMUL4-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i64>, ptr [[TMP5]], align 8 +; LMUL4-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x i64> @llvm.vp.load.nxv4i64.p0(ptr align 8 [[TMP5]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP6]]) ; LMUL4-NEXT: [[TMP7:%.*]] = add <vscale x 4 x i64> [[WIDE_LOAD]], splat (i64 1) -; LMUL4-NEXT: store <vscale x 4 x i64> [[TMP7]], ptr [[TMP5]], align 8 -; LMUL4-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP9]] -; LMUL4-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; LMUL4-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; LMUL4-NEXT: call void @llvm.vp.store.nxv4i64.p0(<vscale x 4 x i64> [[TMP7]], ptr align 8 [[TMP5]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP6]]) +; LMUL4-NEXT: [[TMP10:%.*]] = zext i32 [[TMP6]] to i64 +; LMUL4-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP10]], [[INDEX]] +; LMUL4-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]] +; LMUL4-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; LMUL4-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; LMUL4: middle.block: -; LMUL4-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; LMUL4-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] +; LMUL4-NEXT: br label [[FOR_END:%.*]] ; LMUL4: scalar.ph: -; LMUL4-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; LMUL4-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; LMUL4-NEXT: br label [[FOR_BODY:%.*]] ; LMUL4: for.body: -; LMUL4-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; LMUL4-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; LMUL4-NEXT: [[Q:%.*]] = getelementptr inbounds i64, ptr [[P]], i64 [[IV]] ; LMUL4-NEXT: [[V:%.*]] = load i64, ptr [[Q]], align 8 ; LMUL4-NEXT: [[W:%.*]] = add i64 [[V]], 1 ; LMUL4-NEXT: store i64 [[W]], ptr [[Q]], align 8 ; LMUL4-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; LMUL4-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; LMUL4-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; LMUL4-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; LMUL4: for.end: ; LMUL4-NEXT: ret void ; ; LMUL8-LABEL: @load_store( ; LMUL8-NEXT: entry: -; LMUL8-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; LMUL8-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8 -; LMUL8-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]] -; LMUL8-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; LMUL8-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; LMUL8: vector.ph: -; LMUL8-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; LMUL8-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 -; LMUL8-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] -; LMUL8-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; LMUL8-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() ; LMUL8-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 8 ; LMUL8-NEXT: br label [[VECTOR_BODY:%.*]] ; LMUL8: vector.body: ; LMUL8-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; LMUL8-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; LMUL8-NEXT: [[TMP6:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true) ; LMUL8-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[P:%.*]], i64 [[INDEX]] -; LMUL8-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i64>, ptr [[TMP5]], align 8 +; LMUL8-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 8 x i64> @llvm.vp.load.nxv8i64.p0(ptr align 8 [[TMP5]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP6]]) ; LMUL8-NEXT: [[TMP7:%.*]] = add <vscale x 8 x i64> [[WIDE_LOAD]], splat (i64 1) -; LMUL8-NEXT: store <vscale x 8 x i64> [[TMP7]], ptr [[TMP5]], align 8 -; LMUL8-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP9]] -; LMUL8-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; LMUL8-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; LMUL8-NEXT: call void @llvm.vp.store.nxv8i64.p0(<vscale x 8 x i64> [[TMP7]], ptr align 8 [[TMP5]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP6]]) +; LMUL8-NEXT: [[TMP10:%.*]] = zext i32 [[TMP6]] to i64 +; LMUL8-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP10]], [[INDEX]] +; LMUL8-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]] +; LMUL8-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; LMUL8-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; LMUL8: middle.block: -; LMUL8-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; LMUL8-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] +; LMUL8-NEXT: br label [[FOR_END:%.*]] ; LMUL8: scalar.ph: -; LMUL8-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; LMUL8-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; LMUL8-NEXT: br label [[FOR_BODY:%.*]] ; LMUL8: for.body: -; LMUL8-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; LMUL8-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; LMUL8-NEXT: [[Q:%.*]] = getelementptr inbounds i64, ptr [[P]], i64 [[IV]] ; LMUL8-NEXT: [[V:%.*]] = load i64, ptr [[Q]], align 8 ; LMUL8-NEXT: [[W:%.*]] = add i64 [[V]], 1 ; LMUL8-NEXT: store i64 [[W]], ptr [[Q]], align 8 ; LMUL8-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; LMUL8-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; LMUL8-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; LMUL8-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; LMUL8: for.end: ; LMUL8-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll b/llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll index 32cb426..3de3943 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll @@ -123,12 +123,6 @@ define void @trip8_i8(ptr noalias nocapture noundef %dst, ptr noalias nocapture ; CHECK-NEXT: entry: ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; CHECK-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 -; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 8, [[TMP2]] -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] @@ -185,13 +179,16 @@ define void @trip16_i8(ptr noalias nocapture noundef %dst, ptr noalias nocapture ; CHECK-NEXT: entry: ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: +; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP3]], 8 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1:%.*]], align 1 -; CHECK-NEXT: [[TMP2:%.*]] = shl <16 x i8> [[WIDE_LOAD]], splat (i8 1) -; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP4:%.*]], align 1 -; CHECK-NEXT: [[TMP5:%.*]] = add <16 x i8> [[TMP2]], [[WIDE_LOAD1]] -; CHECK-NEXT: store <16 x i8> [[TMP5]], ptr [[TMP4]], align 1 +; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 16, i32 8, i1 true) +; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 8 x i8> @llvm.vp.load.nxv8i8.p0(ptr align 1 [[TMP1:%.*]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP5]]) +; CHECK-NEXT: [[TMP6:%.*]] = shl <vscale x 8 x i8> [[VP_OP_LOAD]], splat (i8 1) +; CHECK-NEXT: [[VP_OP_LOAD1:%.*]] = call <vscale x 8 x i8> @llvm.vp.load.nxv8i8.p0(ptr align 1 [[TMP4:%.*]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP5]]) +; CHECK-NEXT: [[TMP11:%.*]] = add <vscale x 8 x i8> [[TMP6]], [[VP_OP_LOAD1]] +; CHECK-NEXT: call void @llvm.vp.store.nxv8i8.p0(<vscale x 8 x i8> [[TMP11]], ptr align 1 [[TMP4]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP5]]) ; CHECK-NEXT: br label [[MIDDLE_BLOCK:%.*]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_END:%.*]] @@ -199,7 +196,7 @@ define void @trip16_i8(ptr noalias nocapture noundef %dst, ptr noalias nocapture ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 [[I_08]] ; CHECK-NEXT: [[TMP7:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 ; CHECK-NEXT: [[MUL:%.*]] = shl i8 [[TMP7]], 1 @@ -239,13 +236,16 @@ define void @trip32_i8(ptr noalias nocapture noundef %dst, ptr noalias nocapture ; CHECK-NEXT: entry: ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: +; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP3]], 16 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <32 x i8>, ptr [[TMP1:%.*]], align 1 -; CHECK-NEXT: [[TMP2:%.*]] = shl <32 x i8> [[WIDE_LOAD]], splat (i8 1) -; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <32 x i8>, ptr [[TMP4:%.*]], align 1 -; CHECK-NEXT: [[TMP5:%.*]] = add <32 x i8> [[TMP2]], [[WIDE_LOAD1]] -; CHECK-NEXT: store <32 x i8> [[TMP5]], ptr [[TMP4]], align 1 +; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 32, i32 16, i1 true) +; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[TMP1:%.*]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP5]]) +; CHECK-NEXT: [[TMP6:%.*]] = shl <vscale x 16 x i8> [[VP_OP_LOAD]], splat (i8 1) +; CHECK-NEXT: [[VP_OP_LOAD1:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[TMP4:%.*]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP5]]) +; CHECK-NEXT: [[TMP11:%.*]] = add <vscale x 16 x i8> [[TMP6]], [[VP_OP_LOAD1]] +; CHECK-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[TMP11]], ptr align 1 [[TMP4]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP5]]) ; CHECK-NEXT: br label [[MIDDLE_BLOCK:%.*]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_END:%.*]] @@ -253,7 +253,7 @@ define void @trip32_i8(ptr noalias nocapture noundef %dst, ptr noalias nocapture ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 [[I_08]] ; CHECK-NEXT: [[TMP7:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 ; CHECK-NEXT: [[MUL:%.*]] = shl i8 [[TMP7]], 1 @@ -292,26 +292,24 @@ define void @trip24_i8(ptr noalias nocapture noundef %dst, ptr noalias nocapture ; CHECK-NEXT: entry: ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: +; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 16 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[SRC:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i8>, ptr [[TMP1]], align 1 -; CHECK-NEXT: [[TMP3:%.*]] = shl <8 x i8> [[WIDE_LOAD]], splat (i8 1) -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[DST:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <8 x i8>, ptr [[TMP4]], align 1 -; CHECK-NEXT: [[TMP6:%.*]] = add <8 x i8> [[TMP3]], [[WIDE_LOAD1]] -; CHECK-NEXT: store <8 x i8> [[TMP6]], ptr [[TMP4]], align 1 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 -; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], 24 -; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 24, i32 16, i1 true) +; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[SRC:%.*]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP5]]) +; CHECK-NEXT: [[TMP6:%.*]] = shl <vscale x 16 x i8> [[VP_OP_LOAD]], splat (i8 1) +; CHECK-NEXT: [[VP_OP_LOAD1:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[DST:%.*]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP5]]) +; CHECK-NEXT: [[TMP7:%.*]] = add <vscale x 16 x i8> [[TMP6]], [[VP_OP_LOAD1]] +; CHECK-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[TMP7]], ptr align 1 [[DST]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP5]]) +; CHECK-NEXT: br label [[MIDDLE_BLOCK:%.*]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_END:%.*]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[I_08]] ; CHECK-NEXT: [[TMP8:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 ; CHECK-NEXT: [[MUL:%.*]] = shl i8 [[TMP8]], 1 @@ -321,7 +319,7 @@ define void @trip24_i8(ptr noalias nocapture noundef %dst, ptr noalias nocapture ; CHECK-NEXT: store i8 [[ADD]], ptr [[ARRAYIDX1]], align 1 ; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_08]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 24 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -360,35 +358,31 @@ define i8 @mul_non_pow_2_low_trip_count(ptr noalias %a) { ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <16 x i8> [ <i8 2, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, [[VECTOR_PH]] ], [ [[TMP2:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i64> poison, i64 [[INDEX]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i64> [[BROADCAST_SPLATINSERT]], <16 x i64> poison, <16 x i32> zeroinitializer -; CHECK-NEXT: [[VEC_IV:%.*]] = add <16 x i64> [[BROADCAST_SPLAT]], <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15> -; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = icmp ule <16 x i64> [[VEC_IV]], splat (i64 9) +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <8 x i8> [ <i8 2, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, [[VECTOR_PH]] ], [ [[TMP1:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr [[TMP0]], i32 1, <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i8> poison) -; CHECK-NEXT: [[TMP2]] = mul <16 x i8> [[WIDE_MASKED_LOAD]], [[VEC_PHI]] -; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i8> [[TMP2]], <16 x i8> [[VEC_PHI]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 -; CHECK-NEXT: br i1 true, label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i8>, ptr [[TMP0]], align 1 +; CHECK-NEXT: [[TMP1]] = mul <8 x i8> [[WIDE_LOAD]], [[VEC_PHI]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 +; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i64 [[INDEX_NEXT]], 8 +; CHECK-NEXT: br i1 [[TMP2]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[TMP4:%.*]] = call i8 @llvm.vector.reduce.mul.v16i8(<16 x i8> [[TMP3]]) -; CHECK-NEXT: br label [[FOR_END:%.*]] +; CHECK-NEXT: [[TMP3:%.*]] = call i8 @llvm.vector.reduce.mul.v8i8(<8 x i8> [[TMP1]]) +; CHECK-NEXT: br label [[SCALAR_PH]] ; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] -; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i8 [ 2, [[ENTRY]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 8, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i8 [ [[TMP3]], [[MIDDLE_BLOCK]] ], [ 2, [[ENTRY]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[RDX:%.*]] = phi i8 [ 2, [[SCALAR_PH]] ], [ [[MUL:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[RDX:%.*]] = phi i8 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[MUL:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[IV]] ; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[GEP]], align 1 ; CHECK-NEXT: [[MUL]] = mul i8 [[TMP5]], [[RDX]] ; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 10 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: for.end: -; CHECK-NEXT: [[MUL_LCSSA:%.*]] = phi i8 [ [[MUL]], [[FOR_BODY]] ], [ [[TMP4]], [[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[MUL_LCSSA:%.*]] = phi i8 [ [[MUL]], [[FOR_BODY]] ] ; CHECK-NEXT: ret i8 [[MUL_LCSSA]] ; entry: diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/mask-index-type.ll b/llvm/test/Transforms/LoopVectorize/RISCV/mask-index-type.ll index 10ba208..58506f7 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/mask-index-type.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/mask-index-type.ll @@ -11,15 +11,8 @@ target triple = "riscv64" define void @test(ptr noalias nocapture %a, ptr noalias nocapture %b, i32 %v) { ; VLENUNK-LABEL: @test( ; VLENUNK-NEXT: entry: -; VLENUNK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; VLENUNK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; VLENUNK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]] -; VLENUNK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; VLENUNK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; VLENUNK: vector.ph: -; VLENUNK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; VLENUNK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; VLENUNK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] -; VLENUNK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; VLENUNK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; VLENUNK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; VLENUNK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[V:%.*]], i64 0 @@ -27,32 +20,41 @@ define void @test(ptr noalias nocapture %a, ptr noalias nocapture %b, i32 %v) { ; VLENUNK-NEXT: [[TMP6:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64() ; VLENUNK-NEXT: [[TMP8:%.*]] = mul <vscale x 4 x i64> [[TMP6]], splat (i64 1) ; VLENUNK-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP8]] -; VLENUNK-NEXT: [[TMP11:%.*]] = mul i64 1, [[TMP5]] -; VLENUNK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP11]], i64 0 -; VLENUNK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer ; VLENUNK-NEXT: br label [[VECTOR_BODY:%.*]] ; VLENUNK: vector.body: ; VLENUNK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; VLENUNK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] +; VLENUNK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; VLENUNK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) +; VLENUNK-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP7]], i64 0 +; VLENUNK-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT3]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer +; VLENUNK-NEXT: [[TMP15:%.*]] = zext i32 [[TMP7]] to i64 +; VLENUNK-NEXT: [[TMP9:%.*]] = mul i64 1, [[TMP15]] +; VLENUNK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP9]], i64 0 +; VLENUNK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT1]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer +; VLENUNK-NEXT: [[TMP10:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32() +; VLENUNK-NEXT: [[TMP11:%.*]] = icmp ult <vscale x 4 x i32> [[TMP10]], [[BROADCAST_SPLAT4]] ; VLENUNK-NEXT: [[TMP13:%.*]] = icmp ult <vscale x 4 x i64> [[VEC_IND]], splat (i64 512) +; VLENUNK-NEXT: [[TMP16:%.*]] = select <vscale x 4 x i1> [[TMP11]], <vscale x 4 x i1> [[TMP13]], <vscale x 4 x i1> zeroinitializer ; VLENUNK-NEXT: [[TMP14:%.*]] = getelementptr i32, ptr [[A:%.*]], i64 [[INDEX]] -; VLENUNK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[TMP14]], i32 4, <vscale x 4 x i1> [[TMP13]], <vscale x 4 x i32> poison) -; VLENUNK-NEXT: [[PREDPHI:%.*]] = select <vscale x 4 x i1> [[TMP13]], <vscale x 4 x i32> [[WIDE_MASKED_LOAD]], <vscale x 4 x i32> zeroinitializer +; VLENUNK-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP14]], <vscale x 4 x i1> [[TMP13]], i32 [[TMP7]]) +; VLENUNK-NEXT: [[PREDPHI:%.*]] = select <vscale x 4 x i1> [[TMP16]], <vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i32> zeroinitializer ; VLENUNK-NEXT: [[TMP17:%.*]] = add <vscale x 4 x i32> [[PREDPHI]], [[BROADCAST_SPLAT]] ; VLENUNK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDEX]] -; VLENUNK-NEXT: store <vscale x 4 x i32> [[TMP17]], ptr [[TMP18]], align 4 -; VLENUNK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; VLENUNK-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP17]], ptr align 4 [[TMP18]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]]) +; VLENUNK-NEXT: [[TMP19:%.*]] = zext i32 [[TMP7]] to i64 +; VLENUNK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP19]], [[INDEX]] +; VLENUNK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP19]] ; VLENUNK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]] -; VLENUNK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; VLENUNK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; VLENUNK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; VLENUNK: middle.block: -; VLENUNK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; VLENUNK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] +; VLENUNK-NEXT: br label [[FOR_END:%.*]] ; VLENUNK: scalar.ph: -; VLENUNK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; VLENUNK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; VLENUNK-NEXT: br label [[FOR_BODY:%.*]] ; VLENUNK: for.body: -; VLENUNK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] +; VLENUNK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] ; VLENUNK-NEXT: [[ICMP:%.*]] = icmp ult i64 [[IV]], 512 ; VLENUNK-NEXT: br i1 [[ICMP]], label [[DO_LOAD:%.*]], label [[LATCH]] ; VLENUNK: do_load: @@ -66,7 +68,7 @@ define void @test(ptr noalias nocapture %a, ptr noalias nocapture %b, i32 %v) { ; VLENUNK-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX2]], align 4 ; VLENUNK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; VLENUNK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; VLENUNK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; VLENUNK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; VLENUNK: for.end: ; VLENUNK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/masked_gather_scatter.ll b/llvm/test/Transforms/LoopVectorize/RISCV/masked_gather_scatter.ll index 6800a93..f853cf1 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/masked_gather_scatter.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/masked_gather_scatter.ll @@ -16,11 +16,7 @@ define void @foo4(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture readonly %trigger) local_unnamed_addr #0 { ; RV32-LABEL: @foo4( ; RV32-NEXT: entry: -; RV32-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; RV32-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; RV32-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]]) -; RV32-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 625, [[TMP2]] -; RV32-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]] +; RV32-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]] ; RV32: vector.memcheck: ; RV32-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i32 79880 ; RV32-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[TRIGGER:%.*]], i32 39940 @@ -34,42 +30,41 @@ define void @foo4(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea ; RV32-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[FOUND_CONFLICT]], [[FOUND_CONFLICT5]] ; RV32-NEXT: br i1 [[CONFLICT_RDX]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] ; RV32: vector.ph: -; RV32-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() -; RV32-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2 -; RV32-NEXT: [[N_MOD_VF:%.*]] = urem i64 625, [[TMP4]] -; RV32-NEXT: [[N_VEC:%.*]] = sub i64 625, [[N_MOD_VF]] ; RV32-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() ; RV32-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2 -; RV32-NEXT: [[IND_END:%.*]] = mul i64 [[N_VEC]], 16 ; RV32-NEXT: [[TMP7:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64() ; RV32-NEXT: [[TMP9:%.*]] = mul <vscale x 2 x i64> [[TMP7]], splat (i64 16) ; RV32-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP9]] -; RV32-NEXT: [[TMP12:%.*]] = mul i64 16, [[TMP6]] -; RV32-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP12]], i64 0 -; RV32-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer ; RV32-NEXT: br label [[VECTOR_BODY:%.*]] ; RV32: vector.body: ; RV32-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; RV32-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] +; RV32-NEXT: [[AVL:%.*]] = phi i64 [ 625, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; RV32-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) +; RV32-NEXT: [[TMP8:%.*]] = zext i32 [[TMP10]] to i64 +; RV32-NEXT: [[TMP11:%.*]] = mul i64 16, [[TMP8]] +; RV32-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP11]], i64 0 +; RV32-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer ; RV32-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[TRIGGER]], <vscale x 2 x i64> [[VEC_IND]] -; RV32-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32.nxv2p0(<vscale x 2 x ptr> [[TMP13]], i32 4, <vscale x 2 x i1> splat (i1 true), <vscale x 2 x i32> poison), !alias.scope [[META0:![0-9]+]] +; RV32-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i32> @llvm.vp.gather.nxv2i32.nxv2p0(<vscale x 2 x ptr> align 4 [[TMP13]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]]), !alias.scope [[META0:![0-9]+]] ; RV32-NEXT: [[TMP14:%.*]] = icmp slt <vscale x 2 x i32> [[WIDE_MASKED_GATHER]], splat (i32 100) ; RV32-NEXT: [[TMP15:%.*]] = shl nuw nsw <vscale x 2 x i64> [[VEC_IND]], splat (i64 1) ; RV32-NEXT: [[TMP16:%.*]] = getelementptr inbounds double, ptr [[B]], <vscale x 2 x i64> [[TMP15]] -; RV32-NEXT: [[WIDE_MASKED_GATHER6:%.*]] = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64.nxv2p0(<vscale x 2 x ptr> [[TMP16]], i32 8, <vscale x 2 x i1> [[TMP14]], <vscale x 2 x double> poison), !alias.scope [[META3:![0-9]+]] +; RV32-NEXT: [[WIDE_MASKED_GATHER6:%.*]] = call <vscale x 2 x double> @llvm.vp.gather.nxv2f64.nxv2p0(<vscale x 2 x ptr> align 8 [[TMP16]], <vscale x 2 x i1> [[TMP14]], i32 [[TMP10]]), !alias.scope [[META3:![0-9]+]] ; RV32-NEXT: [[TMP17:%.*]] = sitofp <vscale x 2 x i32> [[WIDE_MASKED_GATHER]] to <vscale x 2 x double> ; RV32-NEXT: [[TMP18:%.*]] = fadd <vscale x 2 x double> [[WIDE_MASKED_GATHER6]], [[TMP17]] ; RV32-NEXT: [[TMP19:%.*]] = getelementptr inbounds double, ptr [[A]], <vscale x 2 x i64> [[VEC_IND]] -; RV32-NEXT: call void @llvm.masked.scatter.nxv2f64.nxv2p0(<vscale x 2 x double> [[TMP18]], <vscale x 2 x ptr> [[TMP19]], i32 8, <vscale x 2 x i1> [[TMP14]]), !alias.scope [[META5:![0-9]+]], !noalias [[META7:![0-9]+]] -; RV32-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] +; RV32-NEXT: call void @llvm.vp.scatter.nxv2f64.nxv2p0(<vscale x 2 x double> [[TMP18]], <vscale x 2 x ptr> align 8 [[TMP19]], <vscale x 2 x i1> [[TMP14]], i32 [[TMP10]]), !alias.scope [[META5:![0-9]+]], !noalias [[META7:![0-9]+]] +; RV32-NEXT: [[TMP20:%.*]] = zext i32 [[TMP10]] to i64 +; RV32-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP20]], [[INDEX]] +; RV32-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP20]] ; RV32-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[DOTSPLAT]] -; RV32-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; RV32-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; RV32-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], 625 +; RV32-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; RV32: middle.block: -; RV32-NEXT: [[CMP_N:%.*]] = icmp eq i64 625, [[N_VEC]] -; RV32-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] +; RV32-NEXT: br label [[FOR_END:%.*]] ; RV32: scalar.ph: -; RV32-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ] +; RV32-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ] ; RV32-NEXT: br label [[FOR_BODY:%.*]] ; RV32: for.body: ; RV32-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC:%.*]] ] @@ -89,17 +84,13 @@ define void @foo4(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea ; RV32: for.inc: ; RV32-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 16 ; RV32-NEXT: [[CMP:%.*]] = icmp ult i64 [[INDVARS_IV_NEXT]], 10000 -; RV32-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END]], !llvm.loop [[LOOP11:![0-9]+]] +; RV32-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END]], !llvm.loop [[LOOP12:![0-9]+]] ; RV32: for.end: ; RV32-NEXT: ret void ; ; RV64-LABEL: @foo4( ; RV64-NEXT: entry: -; RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; RV64-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; RV64-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]]) -; RV64-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 625, [[TMP2]] -; RV64-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]] +; RV64-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]] ; RV64: vector.memcheck: ; RV64-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 79880 ; RV64-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[TRIGGER:%.*]], i64 39940 @@ -113,42 +104,41 @@ define void @foo4(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea ; RV64-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[FOUND_CONFLICT]], [[FOUND_CONFLICT5]] ; RV64-NEXT: br i1 [[CONFLICT_RDX]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] ; RV64: vector.ph: -; RV64-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() -; RV64-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2 -; RV64-NEXT: [[N_MOD_VF:%.*]] = urem i64 625, [[TMP4]] -; RV64-NEXT: [[N_VEC:%.*]] = sub i64 625, [[N_MOD_VF]] ; RV64-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() ; RV64-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2 -; RV64-NEXT: [[IND_END:%.*]] = mul i64 [[N_VEC]], 16 ; RV64-NEXT: [[TMP7:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64() ; RV64-NEXT: [[TMP9:%.*]] = mul <vscale x 2 x i64> [[TMP7]], splat (i64 16) ; RV64-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP9]] -; RV64-NEXT: [[TMP12:%.*]] = mul i64 16, [[TMP6]] -; RV64-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP12]], i64 0 -; RV64-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer ; RV64-NEXT: br label [[VECTOR_BODY:%.*]] ; RV64: vector.body: ; RV64-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; RV64-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] +; RV64-NEXT: [[AVL:%.*]] = phi i64 [ 625, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; RV64-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) +; RV64-NEXT: [[TMP8:%.*]] = zext i32 [[TMP10]] to i64 +; RV64-NEXT: [[TMP11:%.*]] = mul i64 16, [[TMP8]] +; RV64-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP11]], i64 0 +; RV64-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer ; RV64-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[TRIGGER]], <vscale x 2 x i64> [[VEC_IND]] -; RV64-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32.nxv2p0(<vscale x 2 x ptr> [[TMP13]], i32 4, <vscale x 2 x i1> splat (i1 true), <vscale x 2 x i32> poison), !alias.scope [[META0:![0-9]+]] +; RV64-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i32> @llvm.vp.gather.nxv2i32.nxv2p0(<vscale x 2 x ptr> align 4 [[TMP13]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]]), !alias.scope [[META0:![0-9]+]] ; RV64-NEXT: [[TMP14:%.*]] = icmp slt <vscale x 2 x i32> [[WIDE_MASKED_GATHER]], splat (i32 100) ; RV64-NEXT: [[TMP15:%.*]] = shl nuw nsw <vscale x 2 x i64> [[VEC_IND]], splat (i64 1) ; RV64-NEXT: [[TMP16:%.*]] = getelementptr inbounds double, ptr [[B]], <vscale x 2 x i64> [[TMP15]] -; RV64-NEXT: [[WIDE_MASKED_GATHER6:%.*]] = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64.nxv2p0(<vscale x 2 x ptr> [[TMP16]], i32 8, <vscale x 2 x i1> [[TMP14]], <vscale x 2 x double> poison), !alias.scope [[META3:![0-9]+]] +; RV64-NEXT: [[WIDE_MASKED_GATHER6:%.*]] = call <vscale x 2 x double> @llvm.vp.gather.nxv2f64.nxv2p0(<vscale x 2 x ptr> align 8 [[TMP16]], <vscale x 2 x i1> [[TMP14]], i32 [[TMP10]]), !alias.scope [[META3:![0-9]+]] ; RV64-NEXT: [[TMP17:%.*]] = sitofp <vscale x 2 x i32> [[WIDE_MASKED_GATHER]] to <vscale x 2 x double> ; RV64-NEXT: [[TMP18:%.*]] = fadd <vscale x 2 x double> [[WIDE_MASKED_GATHER6]], [[TMP17]] ; RV64-NEXT: [[TMP19:%.*]] = getelementptr inbounds double, ptr [[A]], <vscale x 2 x i64> [[VEC_IND]] -; RV64-NEXT: call void @llvm.masked.scatter.nxv2f64.nxv2p0(<vscale x 2 x double> [[TMP18]], <vscale x 2 x ptr> [[TMP19]], i32 8, <vscale x 2 x i1> [[TMP14]]), !alias.scope [[META5:![0-9]+]], !noalias [[META7:![0-9]+]] -; RV64-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] +; RV64-NEXT: call void @llvm.vp.scatter.nxv2f64.nxv2p0(<vscale x 2 x double> [[TMP18]], <vscale x 2 x ptr> align 8 [[TMP19]], <vscale x 2 x i1> [[TMP14]], i32 [[TMP10]]), !alias.scope [[META5:![0-9]+]], !noalias [[META7:![0-9]+]] +; RV64-NEXT: [[TMP20:%.*]] = zext i32 [[TMP10]] to i64 +; RV64-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP20]], [[INDEX]] +; RV64-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP20]] ; RV64-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[DOTSPLAT]] -; RV64-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; RV64-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; RV64-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], 625 +; RV64-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; RV64: middle.block: -; RV64-NEXT: [[CMP_N:%.*]] = icmp eq i64 625, [[N_VEC]] -; RV64-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] +; RV64-NEXT: br label [[FOR_END:%.*]] ; RV64: scalar.ph: -; RV64-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ] +; RV64-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ] ; RV64-NEXT: br label [[FOR_BODY:%.*]] ; RV64: for.body: ; RV64-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC:%.*]] ] @@ -168,7 +158,7 @@ define void @foo4(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea ; RV64: for.inc: ; RV64-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 16 ; RV64-NEXT: [[CMP:%.*]] = icmp ult i64 [[INDVARS_IV_NEXT]], 10000 -; RV64-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END]], !llvm.loop [[LOOP11:![0-9]+]] +; RV64-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END]], !llvm.loop [[LOOP12:![0-9]+]] ; RV64: for.end: ; RV64-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/partial-reduce-dot-product.ll b/llvm/test/Transforms/LoopVectorize/RISCV/partial-reduce-dot-product.ll index ee6b950..bbd78a4 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/partial-reduce-dot-product.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/partial-reduce-dot-product.ll @@ -1,8 +1,10 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --filter-out-after "^scalar.ph:" --version 4 -; RUN: opt -passes=loop-vectorize -mattr=+v -S < %s | FileCheck %s --check-prefixes=CHECK,V -; RUN: opt -passes=loop-vectorize -mattr=+v,+experimental-zvqdotq -S < %s | FileCheck %s --check-prefixes=CHECK,ZVQDOTQ -; RUN: opt -passes=loop-vectorize -mattr=+v -scalable-vectorization=off -S < %s | FileCheck %s --check-prefixes=FIXED,FIXED-V -; RUN: opt -passes=loop-vectorize -mattr=+v,+experimental-zvqdotq -scalable-vectorization=off -S < %s | FileCheck %s --check-prefixes=FIXED,FIXED-ZVQDOTQ +; RUN: opt -passes=loop-vectorize -mattr=+v -prefer-predicate-over-epilogue=scalar-epilogue -S < %s | FileCheck %s --check-prefixes=CHECK,V +; RUN: opt -passes=loop-vectorize -mattr=+v,+experimental-zvqdotq -prefer-predicate-over-epilogue=scalar-epilogue -S < %s | FileCheck %s --check-prefixes=CHECK,ZVQDOTQ +; RUN: opt -passes=loop-vectorize -mattr=+v -scalable-vectorization=off -prefer-predicate-over-epilogue=scalar-epilogue -S < %s | FileCheck %s --check-prefixes=FIXED,FIXED-V +; RUN: opt -passes=loop-vectorize -mattr=+v,+experimental-zvqdotq -scalable-vectorization=off -prefer-predicate-over-epilogue=scalar-epilogue -S < %s | FileCheck %s --check-prefixes=FIXED,FIXED-ZVQDOTQ + +; TODO: Remove -prefer-predicate-over-epilogue=scalar-epilogue when partial reductions with EVL tail folding is supported. target triple = "riscv64-none-unknown-elf" @@ -19,8 +21,6 @@ define i32 @vqdot(ptr %a, ptr %b) #0 { ; V-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; V-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] ; V-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] -; V-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; V-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; V-NEXT: br label [[VECTOR_BODY:%.*]] ; V: vector.body: ; V-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -33,7 +33,7 @@ define i32 @vqdot(ptr %a, ptr %b) #0 { ; V-NEXT: [[TMP11:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD1]] to <vscale x 4 x i32> ; V-NEXT: [[TMP12:%.*]] = mul <vscale x 4 x i32> [[TMP11]], [[TMP8]] ; V-NEXT: [[TMP13]] = add <vscale x 4 x i32> [[TMP12]], [[VEC_PHI]] -; V-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; V-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; V-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; V-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; V: middle.block: @@ -54,8 +54,6 @@ define i32 @vqdot(ptr %a, ptr %b) #0 { ; ZVQDOTQ-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; ZVQDOTQ-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] ; ZVQDOTQ-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] -; ZVQDOTQ-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; ZVQDOTQ-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; ZVQDOTQ-NEXT: br label [[VECTOR_BODY:%.*]] ; ZVQDOTQ: vector.body: ; ZVQDOTQ-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -68,7 +66,7 @@ define i32 @vqdot(ptr %a, ptr %b) #0 { ; ZVQDOTQ-NEXT: [[TMP11:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD1]] to <vscale x 4 x i32> ; ZVQDOTQ-NEXT: [[TMP12:%.*]] = mul <vscale x 4 x i32> [[TMP11]], [[TMP8]] ; ZVQDOTQ-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 1 x i32> @llvm.experimental.vector.partial.reduce.add.nxv1i32.nxv4i32(<vscale x 1 x i32> [[VEC_PHI]], <vscale x 4 x i32> [[TMP12]]) -; ZVQDOTQ-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; ZVQDOTQ-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; ZVQDOTQ-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; ZVQDOTQ-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; ZVQDOTQ: middle.block: @@ -183,8 +181,6 @@ define i32 @vqdotu(ptr %a, ptr %b) #0 { ; V-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; V-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] ; V-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] -; V-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; V-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; V-NEXT: br label [[VECTOR_BODY:%.*]] ; V: vector.body: ; V-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -197,7 +193,7 @@ define i32 @vqdotu(ptr %a, ptr %b) #0 { ; V-NEXT: [[TMP11:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD1]] to <vscale x 4 x i32> ; V-NEXT: [[TMP12:%.*]] = mul <vscale x 4 x i32> [[TMP11]], [[TMP8]] ; V-NEXT: [[TMP13]] = add <vscale x 4 x i32> [[TMP12]], [[VEC_PHI]] -; V-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; V-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; V-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; V-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; V: middle.block: @@ -218,8 +214,6 @@ define i32 @vqdotu(ptr %a, ptr %b) #0 { ; ZVQDOTQ-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; ZVQDOTQ-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] ; ZVQDOTQ-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] -; ZVQDOTQ-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; ZVQDOTQ-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; ZVQDOTQ-NEXT: br label [[VECTOR_BODY:%.*]] ; ZVQDOTQ: vector.body: ; ZVQDOTQ-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -232,7 +226,7 @@ define i32 @vqdotu(ptr %a, ptr %b) #0 { ; ZVQDOTQ-NEXT: [[TMP11:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD1]] to <vscale x 4 x i32> ; ZVQDOTQ-NEXT: [[TMP12:%.*]] = mul <vscale x 4 x i32> [[TMP11]], [[TMP8]] ; ZVQDOTQ-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 1 x i32> @llvm.experimental.vector.partial.reduce.add.nxv1i32.nxv4i32(<vscale x 1 x i32> [[VEC_PHI]], <vscale x 4 x i32> [[TMP12]]) -; ZVQDOTQ-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; ZVQDOTQ-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; ZVQDOTQ-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; ZVQDOTQ-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; ZVQDOTQ: middle.block: @@ -347,8 +341,6 @@ define i32 @vqdotsu(ptr %a, ptr %b) #0 { ; V-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; V-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] ; V-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] -; V-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; V-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; V-NEXT: br label [[VECTOR_BODY:%.*]] ; V: vector.body: ; V-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -361,7 +353,7 @@ define i32 @vqdotsu(ptr %a, ptr %b) #0 { ; V-NEXT: [[TMP11:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD1]] to <vscale x 4 x i32> ; V-NEXT: [[TMP12:%.*]] = mul <vscale x 4 x i32> [[TMP11]], [[TMP8]] ; V-NEXT: [[TMP13]] = add <vscale x 4 x i32> [[TMP12]], [[VEC_PHI]] -; V-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; V-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; V-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; V-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; V: middle.block: @@ -382,8 +374,6 @@ define i32 @vqdotsu(ptr %a, ptr %b) #0 { ; ZVQDOTQ-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; ZVQDOTQ-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] ; ZVQDOTQ-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] -; ZVQDOTQ-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; ZVQDOTQ-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; ZVQDOTQ-NEXT: br label [[VECTOR_BODY:%.*]] ; ZVQDOTQ: vector.body: ; ZVQDOTQ-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -396,7 +386,7 @@ define i32 @vqdotsu(ptr %a, ptr %b) #0 { ; ZVQDOTQ-NEXT: [[TMP11:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD1]] to <vscale x 4 x i32> ; ZVQDOTQ-NEXT: [[TMP12:%.*]] = mul <vscale x 4 x i32> [[TMP11]], [[TMP8]] ; ZVQDOTQ-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 1 x i32> @llvm.experimental.vector.partial.reduce.add.nxv1i32.nxv4i32(<vscale x 1 x i32> [[VEC_PHI]], <vscale x 4 x i32> [[TMP12]]) -; ZVQDOTQ-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; ZVQDOTQ-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; ZVQDOTQ-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; ZVQDOTQ-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; ZVQDOTQ: middle.block: @@ -510,8 +500,6 @@ define i32 @vqdotsu2(ptr %a, ptr %b) #0 { ; V-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; V-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] ; V-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] -; V-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; V-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; V-NEXT: br label [[VECTOR_BODY:%.*]] ; V: vector.body: ; V-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -524,7 +512,7 @@ define i32 @vqdotsu2(ptr %a, ptr %b) #0 { ; V-NEXT: [[TMP11:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD1]] to <vscale x 4 x i32> ; V-NEXT: [[TMP12:%.*]] = mul <vscale x 4 x i32> [[TMP11]], [[TMP8]] ; V-NEXT: [[TMP13]] = add <vscale x 4 x i32> [[TMP12]], [[VEC_PHI]] -; V-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; V-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; V-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; V-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; V: middle.block: @@ -545,8 +533,6 @@ define i32 @vqdotsu2(ptr %a, ptr %b) #0 { ; ZVQDOTQ-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; ZVQDOTQ-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] ; ZVQDOTQ-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] -; ZVQDOTQ-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; ZVQDOTQ-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; ZVQDOTQ-NEXT: br label [[VECTOR_BODY:%.*]] ; ZVQDOTQ: vector.body: ; ZVQDOTQ-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -559,7 +545,7 @@ define i32 @vqdotsu2(ptr %a, ptr %b) #0 { ; ZVQDOTQ-NEXT: [[TMP11:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD1]] to <vscale x 4 x i32> ; ZVQDOTQ-NEXT: [[TMP12:%.*]] = mul <vscale x 4 x i32> [[TMP11]], [[TMP8]] ; ZVQDOTQ-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 1 x i32> @llvm.experimental.vector.partial.reduce.add.nxv1i32.nxv4i32(<vscale x 1 x i32> [[VEC_PHI]], <vscale x 4 x i32> [[TMP12]]) -; ZVQDOTQ-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; ZVQDOTQ-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; ZVQDOTQ-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; ZVQDOTQ-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; ZVQDOTQ: middle.block: diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/pr87378-vpinstruction-or-drop-poison-generating-flags.ll b/llvm/test/Transforms/LoopVectorize/RISCV/pr87378-vpinstruction-or-drop-poison-generating-flags.ll index b5b62d0..c95adf2 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/pr87378-vpinstruction-or-drop-poison-generating-flags.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/pr87378-vpinstruction-or-drop-poison-generating-flags.ll @@ -9,15 +9,8 @@ define void @pr87378_vpinstruction_or_drop_poison_generating_flags(ptr %arg, i64 ; CHECK-LABEL: define void @pr87378_vpinstruction_or_drop_poison_generating_flags( ; CHECK-SAME: ptr [[ARG:%.*]], i64 [[A:%.*]], i64 [[B:%.*]], i64 [[C:%.*]]) #[[ATTR0:[0-9]+]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1001, [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1001, [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1001, [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[A]], i64 0 @@ -29,39 +22,49 @@ define void @pr87378_vpinstruction_or_drop_poison_generating_flags(ptr %arg, i64 ; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 8 x i64> @llvm.stepvector.nxv8i64() ; CHECK-NEXT: [[TMP7:%.*]] = mul <vscale x 8 x i64> [[TMP6]], splat (i64 1) ; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 8 x i64> zeroinitializer, [[TMP7]] -; CHECK-NEXT: [[TMP8:%.*]] = mul i64 1, [[TMP5]] -; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP8]], i64 0 -; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 8 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1001, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP25:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true) +; CHECK-NEXT: [[BROADCAST_SPLATINSERT7:%.*]] = insertelement <vscale x 8 x i32> poison, i32 [[TMP25]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT8:%.*]] = shufflevector <vscale x 8 x i32> [[BROADCAST_SPLATINSERT7]], <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer +; CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP25]] to i64 +; CHECK-NEXT: [[TMP9:%.*]] = mul i64 1, [[TMP8]] +; CHECK-NEXT: [[BROADCAST_SPLATINSERT5:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP9]], i64 0 +; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[BROADCAST_SPLATINSERT5]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer +; CHECK-NEXT: [[TMP10:%.*]] = call <vscale x 8 x i32> @llvm.stepvector.nxv8i32() +; CHECK-NEXT: [[TMP11:%.*]] = icmp ult <vscale x 8 x i32> [[TMP10]], [[BROADCAST_SPLAT8]] ; CHECK-NEXT: [[TMP13:%.*]] = icmp ule <vscale x 8 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[TMP28:%.*]] = select <vscale x 8 x i1> [[TMP11]], <vscale x 8 x i1> [[TMP13]], <vscale x 8 x i1> zeroinitializer ; CHECK-NEXT: [[TMP14:%.*]] = icmp ule <vscale x 8 x i64> [[VEC_IND]], [[BROADCAST_SPLAT2]] -; CHECK-NEXT: [[TMP15:%.*]] = select <vscale x 8 x i1> [[TMP13]], <vscale x 8 x i1> [[TMP14]], <vscale x 8 x i1> zeroinitializer +; CHECK-NEXT: [[TMP15:%.*]] = select <vscale x 8 x i1> [[TMP28]], <vscale x 8 x i1> [[TMP14]], <vscale x 8 x i1> zeroinitializer ; CHECK-NEXT: [[TMP16:%.*]] = xor <vscale x 8 x i1> [[TMP13]], splat (i1 true) -; CHECK-NEXT: [[TMP17:%.*]] = or <vscale x 8 x i1> [[TMP15]], [[TMP16]] +; CHECK-NEXT: [[TMP29:%.*]] = select <vscale x 8 x i1> [[TMP11]], <vscale x 8 x i1> [[TMP16]], <vscale x 8 x i1> zeroinitializer +; CHECK-NEXT: [[TMP17:%.*]] = or <vscale x 8 x i1> [[TMP15]], [[TMP29]] ; CHECK-NEXT: [[TMP18:%.*]] = icmp ule <vscale x 8 x i64> [[VEC_IND]], [[BROADCAST_SPLAT4]] ; CHECK-NEXT: [[TMP19:%.*]] = select <vscale x 8 x i1> [[TMP17]], <vscale x 8 x i1> [[TMP18]], <vscale x 8 x i1> zeroinitializer ; CHECK-NEXT: [[TMP20:%.*]] = xor <vscale x 8 x i1> [[TMP14]], splat (i1 true) -; CHECK-NEXT: [[TMP21:%.*]] = select <vscale x 8 x i1> [[TMP13]], <vscale x 8 x i1> [[TMP20]], <vscale x 8 x i1> zeroinitializer +; CHECK-NEXT: [[TMP21:%.*]] = select <vscale x 8 x i1> [[TMP28]], <vscale x 8 x i1> [[TMP20]], <vscale x 8 x i1> zeroinitializer ; CHECK-NEXT: [[TMP22:%.*]] = or <vscale x 8 x i1> [[TMP19]], [[TMP21]] ; CHECK-NEXT: [[TMP23:%.*]] = extractelement <vscale x 8 x i1> [[TMP21]], i32 0 ; CHECK-NEXT: [[PREDPHI:%.*]] = select i1 [[TMP23]], i64 poison, i64 [[INDEX]] ; CHECK-NEXT: [[TMP24:%.*]] = getelementptr i16, ptr [[ARG]], i64 [[PREDPHI]] -; CHECK-NEXT: call void @llvm.masked.store.nxv8i16.p0(<vscale x 8 x i16> zeroinitializer, ptr [[TMP24]], i32 2, <vscale x 8 x i1> [[TMP22]]) -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-NEXT: call void @llvm.vp.store.nxv8i16.p0(<vscale x 8 x i16> zeroinitializer, ptr align 2 [[TMP24]], <vscale x 8 x i1> [[TMP22]], i32 [[TMP25]]) +; CHECK-NEXT: [[TMP26:%.*]] = zext i32 [[TMP25]] to i64 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP26]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP26]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 8 x i64> [[VEC_IND]], [[DOTSPLAT]] -; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-NEXT: [[TMP27:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1001 +; CHECK-NEXT: br i1 [[TMP27]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1001, [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] +; CHECK-NEXT: br label [[EXIT:%.*]] ; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[LOOP_HEADER:%.*]] ; CHECK: loop.header: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] ; CHECK-NEXT: [[C_1:%.*]] = icmp ule i64 [[IV]], [[A]] ; CHECK-NEXT: br i1 [[C_1]], label [[THEN_1:%.*]], label [[ELSE_1:%.*]] ; CHECK: then.1: @@ -80,7 +83,7 @@ define void @pr87378_vpinstruction_or_drop_poison_generating_flags(ptr %arg, i64 ; CHECK: loop.latch: ; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 ; CHECK-NEXT: [[ICMP:%.*]] = icmp eq i64 [[IV]], 1000 -; CHECK-NEXT: br i1 [[ICMP]], label [[EXIT]], label [[LOOP_HEADER]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK-NEXT: br i1 [[ICMP]], label [[EXIT]], label [[LOOP_HEADER]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -118,8 +121,9 @@ exit: ret void } ;. -; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} +; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]], [[META3:![0-9]+]]} ; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} -; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} -; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]} +; CHECK: [[META2]] = !{!"llvm.loop.isvectorized.tailfoldingstyle", !"evl"} +; CHECK: [[META3]] = !{!"llvm.loop.unroll.runtime.disable"} +; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META3]], [[META1]]} ;. diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll b/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll index d41d47a..ca94fce 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll @@ -7,12 +7,6 @@ define void @test(ptr %p, i64 %a, i8 %b) { ; CHECK-NEXT: entry: ; CHECK-NEXT: br i1 false, label [[SCALAR_PH1:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.vscale.i32() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i32 [[TMP0]], 2 -; CHECK-NEXT: [[TMP2:%.*]] = sub i32 [[TMP1]], 1 -; CHECK-NEXT: [[N_RND_UP:%.*]] = add i32 9, [[TMP2]] -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N_RND_UP]], [[TMP1]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[N_RND_UP]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vscale.i32() ; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i32 [[TMP3]], 2 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i8> poison, i8 [[B]], i64 0 diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/reductions.ll b/llvm/test/Transforms/LoopVectorize/RISCV/reductions.ll index 554ce7b..2028df7 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/reductions.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/reductions.ll @@ -9,46 +9,43 @@ define i32 @add(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) { ; CHECK-LABEL: define i32 @add( ; CHECK-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] { ; CHECK-NEXT: [[ENTRY:.*]]: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ insertelement (<vscale x 4 x i32> zeroinitializer, i32 2, i32 0), %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ insertelement (<vscale x 4 x i32> zeroinitializer, i32 2, i32 0), %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4 -; CHECK-NEXT: [[TMP7]] = add <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP13]]) +; CHECK-NEXT: [[TMP7:%.*]] = add <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]] +; CHECK-NEXT: [[TMP8]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP7]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP13]]) +; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP13]] to i64 +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] +; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP7]]) -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]] +; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP8]]) +; CHECK-NEXT: br label %[[FOR_END:.*]] ; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP9]], %[[MIDDLE_BLOCK]] ], [ 2, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 2, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[FOR_BODY:.*]] ; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[SUM_07:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[SUM_07:%.*]] = phi i32 [ 2, %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] ; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[ADD]] = add nsw i32 [[TMP10]], [[SUM_07]] ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP9]], %[[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP11]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i32 [[ADD_LCSSA]] ; entry: @@ -74,46 +71,43 @@ define i32 @or(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) { ; CHECK-LABEL: define i32 @or( ; CHECK-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*]]: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ insertelement (<vscale x 4 x i32> zeroinitializer, i32 2, i32 0), %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ insertelement (<vscale x 4 x i32> zeroinitializer, i32 2, i32 0), %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4 -; CHECK-NEXT: [[TMP7]] = or <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP13]]) +; CHECK-NEXT: [[TMP7:%.*]] = or <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]] +; CHECK-NEXT: [[TMP8]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP7]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP13]]) +; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP13]] to i64 +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] +; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.or.nxv4i32(<vscale x 4 x i32> [[TMP7]]) -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]] +; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.or.nxv4i32(<vscale x 4 x i32> [[TMP8]]) +; CHECK-NEXT: br label %[[FOR_END:.*]] ; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP9]], %[[MIDDLE_BLOCK]] ], [ 2, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 2, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[FOR_BODY:.*]] ; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[SUM_07:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[OR:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[SUM_07:%.*]] = phi i32 [ 2, %[[SCALAR_PH]] ], [ [[OR:%.*]], %[[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] ; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[OR]] = or i32 [[TMP10]], [[SUM_07]] ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[OR_LCSSA:%.*]] = phi i32 [ [[OR]], %[[FOR_BODY]] ], [ [[TMP9]], %[[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[OR_LCSSA:%.*]] = phi i32 [ [[OR]], %[[FOR_BODY]] ], [ [[TMP11]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i32 [[OR_LCSSA]] ; entry: @@ -139,46 +133,43 @@ define i32 @and(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) { ; CHECK-LABEL: define i32 @and( ; CHECK-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*]]: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ insertelement (<vscale x 4 x i32> splat (i32 -1), i32 2, i32 0), %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ insertelement (<vscale x 4 x i32> splat (i32 -1), i32 2, i32 0), %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4 -; CHECK-NEXT: [[TMP7]] = and <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP13]]) +; CHECK-NEXT: [[TMP7:%.*]] = and <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]] +; CHECK-NEXT: [[TMP8]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP7]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP13]]) +; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP13]] to i64 +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] +; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.and.nxv4i32(<vscale x 4 x i32> [[TMP7]]) -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]] +; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.and.nxv4i32(<vscale x 4 x i32> [[TMP8]]) +; CHECK-NEXT: br label %[[FOR_END:.*]] ; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP9]], %[[MIDDLE_BLOCK]] ], [ 2, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 2, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[FOR_BODY:.*]] ; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[SUM_07:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[AND:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[SUM_07:%.*]] = phi i32 [ 2, %[[SCALAR_PH]] ], [ [[AND:%.*]], %[[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] ; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[AND]] = and i32 [[TMP10]], [[SUM_07]] ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[AND_LCSSA:%.*]] = phi i32 [ [[AND]], %[[FOR_BODY]] ], [ [[TMP9]], %[[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[AND_LCSSA:%.*]] = phi i32 [ [[AND]], %[[FOR_BODY]] ], [ [[TMP11]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i32 [[AND_LCSSA]] ; entry: @@ -204,46 +195,43 @@ define i32 @xor(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) { ; CHECK-LABEL: define i32 @xor( ; CHECK-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*]]: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ insertelement (<vscale x 4 x i32> zeroinitializer, i32 2, i32 0), %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ insertelement (<vscale x 4 x i32> zeroinitializer, i32 2, i32 0), %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4 -; CHECK-NEXT: [[TMP7]] = xor <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP13]]) +; CHECK-NEXT: [[TMP7:%.*]] = xor <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]] +; CHECK-NEXT: [[TMP8]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP7]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP13]]) +; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP13]] to i64 +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] +; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.xor.nxv4i32(<vscale x 4 x i32> [[TMP7]]) -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]] +; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.xor.nxv4i32(<vscale x 4 x i32> [[TMP8]]) +; CHECK-NEXT: br label %[[FOR_END:.*]] ; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP9]], %[[MIDDLE_BLOCK]] ], [ 2, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 2, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[FOR_BODY:.*]] ; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[SUM_07:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[SUM_07:%.*]] = phi i32 [ 2, %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] ; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[XOR]] = xor i32 [[TMP10]], [[SUM_07]] ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[XOR_LCSSA:%.*]] = phi i32 [ [[XOR]], %[[FOR_BODY]] ], [ [[TMP9]], %[[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[XOR_LCSSA:%.*]] = phi i32 [ [[XOR]], %[[FOR_BODY]] ], [ [[TMP11]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i32 [[XOR_LCSSA]] ; entry: @@ -269,48 +257,45 @@ define i32 @smin(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) { ; CHECK-LABEL: define i32 @smin( ; CHECK-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*]]: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ splat (i32 2), %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ splat (i32 2), %[[VECTOR_PH]] ], [ [[TMP9:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]]) ; CHECK-NEXT: [[TMP7:%.*]] = icmp slt <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]] -; CHECK-NEXT: [[TMP8]] = select <vscale x 4 x i1> [[TMP7]], <vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> [[VEC_PHI]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-NEXT: [[TMP8:%.*]] = select <vscale x 4 x i1> [[TMP7]], <vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> [[VEC_PHI]] +; CHECK-NEXT: [[TMP9]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP8]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP14]]) +; CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP14]] to i64 +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP10]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]] +; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.smin.nxv4i32(<vscale x 4 x i32> [[TMP8]]) -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]] +; CHECK-NEXT: [[TMP12:%.*]] = call i32 @llvm.vector.reduce.smin.nxv4i32(<vscale x 4 x i32> [[TMP9]]) +; CHECK-NEXT: br label %[[FOR_END:.*]] ; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 2, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 2, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[FOR_BODY:.*]] ; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[SUM_010:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[SUM_010:%.*]] = phi i32 [ 2, %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] ; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[TMP11]], [[SUM_010]] ; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], i32 [[TMP11]], i32 [[SUM_010]] ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi i32 [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP10]], %[[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi i32 [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP12]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i32 [[DOTSROA_SPECULATED_LCSSA]] ; entry: @@ -337,48 +322,45 @@ define i32 @umax(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) { ; CHECK-LABEL: define i32 @umax( ; CHECK-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*]]: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ splat (i32 2), %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ splat (i32 2), %[[VECTOR_PH]] ], [ [[TMP9:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]]) ; CHECK-NEXT: [[TMP7:%.*]] = icmp ugt <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]] -; CHECK-NEXT: [[TMP8]] = select <vscale x 4 x i1> [[TMP7]], <vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> [[VEC_PHI]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-NEXT: [[TMP8:%.*]] = select <vscale x 4 x i1> [[TMP7]], <vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> [[VEC_PHI]] +; CHECK-NEXT: [[TMP9]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP8]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP14]]) +; CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP14]] to i64 +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP10]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]] +; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.umax.nxv4i32(<vscale x 4 x i32> [[TMP8]]) -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]] +; CHECK-NEXT: [[TMP12:%.*]] = call i32 @llvm.vector.reduce.umax.nxv4i32(<vscale x 4 x i32> [[TMP9]]) +; CHECK-NEXT: br label %[[FOR_END:.*]] ; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 2, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 2, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[FOR_BODY:.*]] ; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[SUM_010:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[SUM_010:%.*]] = phi i32 [ 2, %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] ; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[CMP_I:%.*]] = icmp ugt i32 [[TMP11]], [[SUM_010]] ; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], i32 [[TMP11]], i32 [[SUM_010]] ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi i32 [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP10]], %[[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi i32 [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP12]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i32 [[DOTSROA_SPECULATED_LCSSA]] ; entry: @@ -405,46 +387,43 @@ define float @fadd_fast(ptr noalias nocapture readonly %a, i64 %n) { ; CHECK-LABEL: define float @fadd_fast( ; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*]]: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP6]], align 4 -; CHECK-NEXT: [[TMP7]] = fadd fast <vscale x 4 x float> [[WIDE_LOAD]], [[VEC_PHI]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP13]]) +; CHECK-NEXT: [[TMP7:%.*]] = fadd fast <vscale x 4 x float> [[WIDE_LOAD]], [[VEC_PHI]] +; CHECK-NEXT: [[TMP8]] = call <vscale x 4 x float> @llvm.vp.merge.nxv4f32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x float> [[TMP7]], <vscale x 4 x float> [[VEC_PHI]], i32 [[TMP13]]) +; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP13]] to i64 +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] +; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: [[TMP9:%.*]] = call fast float @llvm.vector.reduce.fadd.nxv4f32(float 0.000000e+00, <vscale x 4 x float> [[TMP7]]) -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]] +; CHECK-NEXT: [[TMP11:%.*]] = call fast float @llvm.vector.reduce.fadd.nxv4f32(float 0.000000e+00, <vscale x 4 x float> [[TMP8]]) +; CHECK-NEXT: br label %[[FOR_END:.*]] ; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP9]], %[[MIDDLE_BLOCK]] ], [ 0.000000e+00, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[FOR_BODY:.*]] ; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[SUM_07:%.*]] = phi float [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] ; CHECK-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[ADD]] = fadd fast float [[TMP10]], [[SUM_07]] ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP9]], %[[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP11]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret float [[ADD_LCSSA]] ; entry: @@ -468,46 +447,43 @@ define half @fadd_fast_half_zvfh(ptr noalias nocapture readonly %a, i64 %n) "tar ; CHECK-LABEL: define half @fadd_fast_half_zvfh( ; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], i64 [[N:%.*]]) #[[ATTR1:[0-9]+]] { ; CHECK-NEXT: [[ENTRY:.*]]: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8 ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x half> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x half> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true) ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x half>, ptr [[TMP6]], align 4 -; CHECK-NEXT: [[TMP7]] = fadd fast <vscale x 8 x half> [[WIDE_LOAD]], [[VEC_PHI]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr align 4 [[TMP6]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP13]]) +; CHECK-NEXT: [[TMP7:%.*]] = fadd fast <vscale x 8 x half> [[WIDE_LOAD]], [[VEC_PHI]] +; CHECK-NEXT: [[TMP8]] = call <vscale x 8 x half> @llvm.vp.merge.nxv8f16(<vscale x 8 x i1> splat (i1 true), <vscale x 8 x half> [[TMP7]], <vscale x 8 x half> [[VEC_PHI]], i32 [[TMP13]]) +; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP13]] to i64 +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] +; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: [[TMP9:%.*]] = call fast half @llvm.vector.reduce.fadd.nxv8f16(half 0xH0000, <vscale x 8 x half> [[TMP7]]) -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]] +; CHECK-NEXT: [[TMP11:%.*]] = call fast half @llvm.vector.reduce.fadd.nxv8f16(half 0xH0000, <vscale x 8 x half> [[TMP8]]) +; CHECK-NEXT: br label %[[FOR_END:.*]] ; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi half [ [[TMP9]], %[[MIDDLE_BLOCK]] ], [ 0xH0000, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi half [ 0xH0000, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[FOR_BODY:.*]] ; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[SUM_07:%.*]] = phi half [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[SUM_07:%.*]] = phi half [ 0xH0000, %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[IV]] ; CHECK-NEXT: [[TMP10:%.*]] = load half, ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[ADD]] = fadd fast half [[TMP10]], [[SUM_07]] ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi half [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP9]], %[[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi half [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP11]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret half [[ADD_LCSSA]] ; entry: @@ -549,7 +525,7 @@ define half @fadd_fast_half_zvfhmin(ptr noalias nocapture readonly %a, i64 %n) " ; CHECK-NEXT: [[TMP3]] = fadd fast <16 x half> [[WIDE_LOAD2]], [[VEC_PHI1]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 ; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd fast <16 x half> [[TMP3]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call fast half @llvm.vector.reduce.fadd.v16f16(half 0xH0000, <16 x half> [[BIN_RDX]]) @@ -567,7 +543,7 @@ define half @fadd_fast_half_zvfhmin(ptr noalias nocapture readonly %a, i64 %n) " ; CHECK-NEXT: [[ADD]] = fadd fast half [[TMP6]], [[SUM_07]] ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; CHECK: [[FOR_END]]: ; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi half [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP5]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret half [[ADD_LCSSA]] @@ -611,7 +587,7 @@ define bfloat @fadd_fast_bfloat(ptr noalias nocapture readonly %a, i64 %n) "targ ; CHECK-NEXT: [[TMP3]] = fadd fast <16 x bfloat> [[WIDE_LOAD2]], [[VEC_PHI1]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 ; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd fast <16 x bfloat> [[TMP3]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call fast bfloat @llvm.vector.reduce.fadd.v16bf16(bfloat 0xR0000, <16 x bfloat> [[BIN_RDX]]) @@ -629,7 +605,7 @@ define bfloat @fadd_fast_bfloat(ptr noalias nocapture readonly %a, i64 %n) "targ ; CHECK-NEXT: [[ADD]] = fadd fast bfloat [[TMP6]], [[SUM_07]] ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] ; CHECK: [[FOR_END]]: ; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi bfloat [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP5]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret bfloat [[ADD_LCSSA]] @@ -657,48 +633,45 @@ define float @fmin_fast(ptr noalias nocapture readonly %a, i64 %n) #0 { ; CHECK-LABEL: define float @fmin_fast( ; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], i64 [[N:%.*]]) #[[ATTR4:[0-9]+]] { ; CHECK-NEXT: [[ENTRY:.*]]: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP9:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP6]], align 4 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]]) ; CHECK-NEXT: [[TMP7:%.*]] = fcmp olt <vscale x 4 x float> [[WIDE_LOAD]], [[VEC_PHI]] -; CHECK-NEXT: [[TMP8]] = select <vscale x 4 x i1> [[TMP7]], <vscale x 4 x float> [[WIDE_LOAD]], <vscale x 4 x float> [[VEC_PHI]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; CHECK-NEXT: [[TMP8:%.*]] = select <vscale x 4 x i1> [[TMP7]], <vscale x 4 x float> [[WIDE_LOAD]], <vscale x 4 x float> [[VEC_PHI]] +; CHECK-NEXT: [[TMP9]] = call <vscale x 4 x float> @llvm.vp.merge.nxv4f32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x float> [[TMP8]], <vscale x 4 x float> [[VEC_PHI]], i32 [[TMP14]]) +; CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP14]] to i64 +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP10]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]] +; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: [[TMP10:%.*]] = call float @llvm.vector.reduce.fmin.nxv4f32(<vscale x 4 x float> [[TMP8]]) -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]] +; CHECK-NEXT: [[TMP12:%.*]] = call float @llvm.vector.reduce.fmin.nxv4f32(<vscale x 4 x float> [[TMP9]]) +; CHECK-NEXT: br label %[[FOR_END:.*]] ; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 0.000000e+00, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[FOR_BODY:.*]] ; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[SUM_07:%.*]] = phi float [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] ; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[CMP_I:%.*]] = fcmp olt float [[TMP11]], [[SUM_07]] ; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], float [[TMP11]], float [[SUM_07]] ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi float [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP10]], %[[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi float [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP12]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret float [[DOTSROA_SPECULATED_LCSSA]] ; entry: @@ -723,48 +696,45 @@ define half @fmin_fast_half_zvfhmin(ptr noalias nocapture readonly %a, i64 %n) # ; CHECK-LABEL: define half @fmin_fast_half_zvfhmin( ; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], i64 [[N:%.*]]) #[[ATTR5:[0-9]+]] { ; CHECK-NEXT: [[ENTRY:.*]]: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8 ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x half> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x half> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP9:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true) ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x half>, ptr [[TMP6]], align 4 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr align 4 [[TMP6]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP14]]) ; CHECK-NEXT: [[TMP7:%.*]] = fcmp olt <vscale x 8 x half> [[WIDE_LOAD]], [[VEC_PHI]] -; CHECK-NEXT: [[TMP8]] = select <vscale x 8 x i1> [[TMP7]], <vscale x 8 x half> [[WIDE_LOAD]], <vscale x 8 x half> [[VEC_PHI]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] +; CHECK-NEXT: [[TMP8:%.*]] = select <vscale x 8 x i1> [[TMP7]], <vscale x 8 x half> [[WIDE_LOAD]], <vscale x 8 x half> [[VEC_PHI]] +; CHECK-NEXT: [[TMP9]] = call <vscale x 8 x half> @llvm.vp.merge.nxv8f16(<vscale x 8 x i1> splat (i1 true), <vscale x 8 x half> [[TMP8]], <vscale x 8 x half> [[VEC_PHI]], i32 [[TMP14]]) +; CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP14]] to i64 +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP10]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]] +; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: [[TMP10:%.*]] = call half @llvm.vector.reduce.fmin.nxv8f16(<vscale x 8 x half> [[TMP8]]) -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]] +; CHECK-NEXT: [[TMP12:%.*]] = call half @llvm.vector.reduce.fmin.nxv8f16(<vscale x 8 x half> [[TMP9]]) +; CHECK-NEXT: br label %[[FOR_END:.*]] ; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi half [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 0xH0000, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi half [ 0xH0000, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[FOR_BODY:.*]] ; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[SUM_07:%.*]] = phi half [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[SUM_07:%.*]] = phi half [ 0xH0000, %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[IV]] ; CHECK-NEXT: [[TMP11:%.*]] = load half, ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[CMP_I:%.*]] = fcmp olt half [[TMP11]], [[SUM_07]] ; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], half [[TMP11]], half [[SUM_07]] ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi half [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP10]], %[[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi half [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP12]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret half [[DOTSROA_SPECULATED_LCSSA]] ; entry: @@ -789,48 +759,45 @@ define bfloat @fmin_fast_bfloat_zvfbfmin(ptr noalias nocapture readonly %a, i64 ; CHECK-LABEL: define bfloat @fmin_fast_bfloat_zvfbfmin( ; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], i64 [[N:%.*]]) #[[ATTR6:[0-9]+]] { ; CHECK-NEXT: [[ENTRY:.*]]: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8 ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x bfloat> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x bfloat> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP9:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true) ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds bfloat, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x bfloat>, ptr [[TMP6]], align 4 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 8 x bfloat> @llvm.vp.load.nxv8bf16.p0(ptr align 4 [[TMP6]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP14]]) ; CHECK-NEXT: [[TMP7:%.*]] = fcmp olt <vscale x 8 x bfloat> [[WIDE_LOAD]], [[VEC_PHI]] -; CHECK-NEXT: [[TMP8]] = select <vscale x 8 x i1> [[TMP7]], <vscale x 8 x bfloat> [[WIDE_LOAD]], <vscale x 8 x bfloat> [[VEC_PHI]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] +; CHECK-NEXT: [[TMP8:%.*]] = select <vscale x 8 x i1> [[TMP7]], <vscale x 8 x bfloat> [[WIDE_LOAD]], <vscale x 8 x bfloat> [[VEC_PHI]] +; CHECK-NEXT: [[TMP9]] = call <vscale x 8 x bfloat> @llvm.vp.merge.nxv8bf16(<vscale x 8 x i1> splat (i1 true), <vscale x 8 x bfloat> [[TMP8]], <vscale x 8 x bfloat> [[VEC_PHI]], i32 [[TMP14]]) +; CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP14]] to i64 +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP10]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]] +; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: [[TMP10:%.*]] = call bfloat @llvm.vector.reduce.fmin.nxv8bf16(<vscale x 8 x bfloat> [[TMP8]]) -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]] +; CHECK-NEXT: [[TMP12:%.*]] = call bfloat @llvm.vector.reduce.fmin.nxv8bf16(<vscale x 8 x bfloat> [[TMP9]]) +; CHECK-NEXT: br label %[[FOR_END:.*]] ; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi bfloat [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 0xR0000, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi bfloat [ 0xR0000, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[FOR_BODY:.*]] ; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[SUM_07:%.*]] = phi bfloat [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[SUM_07:%.*]] = phi bfloat [ 0xR0000, %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds bfloat, ptr [[A]], i64 [[IV]] ; CHECK-NEXT: [[TMP11:%.*]] = load bfloat, ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[CMP_I:%.*]] = fcmp olt bfloat [[TMP11]], [[SUM_07]] ; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], bfloat [[TMP11]], bfloat [[SUM_07]] ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi bfloat [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP10]], %[[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi bfloat [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP12]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret bfloat [[DOTSROA_SPECULATED_LCSSA]] ; entry: @@ -857,48 +824,45 @@ define float @fmax_fast(ptr noalias nocapture readonly %a, i64 %n) #0 { ; CHECK-LABEL: define float @fmax_fast( ; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], i64 [[N:%.*]]) #[[ATTR4]] { ; CHECK-NEXT: [[ENTRY:.*]]: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP9:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP6]], align 4 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]]) ; CHECK-NEXT: [[TMP7:%.*]] = fcmp fast ogt <vscale x 4 x float> [[WIDE_LOAD]], [[VEC_PHI]] -; CHECK-NEXT: [[TMP8]] = select <vscale x 4 x i1> [[TMP7]], <vscale x 4 x float> [[WIDE_LOAD]], <vscale x 4 x float> [[VEC_PHI]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] +; CHECK-NEXT: [[TMP8:%.*]] = select <vscale x 4 x i1> [[TMP7]], <vscale x 4 x float> [[WIDE_LOAD]], <vscale x 4 x float> [[VEC_PHI]] +; CHECK-NEXT: [[TMP9]] = call <vscale x 4 x float> @llvm.vp.merge.nxv4f32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x float> [[TMP8]], <vscale x 4 x float> [[VEC_PHI]], i32 [[TMP14]]) +; CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP14]] to i64 +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP10]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]] +; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: [[TMP10:%.*]] = call fast float @llvm.vector.reduce.fmax.nxv4f32(<vscale x 4 x float> [[TMP8]]) -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]] +; CHECK-NEXT: [[TMP12:%.*]] = call fast float @llvm.vector.reduce.fmax.nxv4f32(<vscale x 4 x float> [[TMP9]]) +; CHECK-NEXT: br label %[[FOR_END:.*]] ; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 0.000000e+00, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[FOR_BODY:.*]] ; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[SUM_07:%.*]] = phi float [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] ; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[CMP_I:%.*]] = fcmp fast ogt float [[TMP11]], [[SUM_07]] ; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], float [[TMP11]], float [[SUM_07]] ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi float [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP10]], %[[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi float [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP12]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret float [[DOTSROA_SPECULATED_LCSSA]] ; entry: @@ -923,48 +887,45 @@ define half @fmax_fast_half_zvfhmin(ptr noalias nocapture readonly %a, i64 %n) # ; CHECK-LABEL: define half @fmax_fast_half_zvfhmin( ; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], i64 [[N:%.*]]) #[[ATTR5]] { ; CHECK-NEXT: [[ENTRY:.*]]: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8 ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x half> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x half> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP9:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true) ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x half>, ptr [[TMP6]], align 4 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr align 4 [[TMP6]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP14]]) ; CHECK-NEXT: [[TMP7:%.*]] = fcmp fast ogt <vscale x 8 x half> [[WIDE_LOAD]], [[VEC_PHI]] -; CHECK-NEXT: [[TMP8]] = select <vscale x 8 x i1> [[TMP7]], <vscale x 8 x half> [[WIDE_LOAD]], <vscale x 8 x half> [[VEC_PHI]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]] +; CHECK-NEXT: [[TMP8:%.*]] = select <vscale x 8 x i1> [[TMP7]], <vscale x 8 x half> [[WIDE_LOAD]], <vscale x 8 x half> [[VEC_PHI]] +; CHECK-NEXT: [[TMP9]] = call <vscale x 8 x half> @llvm.vp.merge.nxv8f16(<vscale x 8 x i1> splat (i1 true), <vscale x 8 x half> [[TMP8]], <vscale x 8 x half> [[VEC_PHI]], i32 [[TMP14]]) +; CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP14]] to i64 +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP10]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]] +; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP31:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: [[TMP10:%.*]] = call fast half @llvm.vector.reduce.fmax.nxv8f16(<vscale x 8 x half> [[TMP8]]) -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]] +; CHECK-NEXT: [[TMP12:%.*]] = call fast half @llvm.vector.reduce.fmax.nxv8f16(<vscale x 8 x half> [[TMP9]]) +; CHECK-NEXT: br label %[[FOR_END:.*]] ; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi half [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 0xH0000, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi half [ 0xH0000, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[FOR_BODY:.*]] ; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[SUM_07:%.*]] = phi half [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[SUM_07:%.*]] = phi half [ 0xH0000, %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[IV]] ; CHECK-NEXT: [[TMP11:%.*]] = load half, ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[CMP_I:%.*]] = fcmp fast ogt half [[TMP11]], [[SUM_07]] ; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], half [[TMP11]], half [[SUM_07]] ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP31:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi half [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP10]], %[[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi half [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP12]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret half [[DOTSROA_SPECULATED_LCSSA]] ; entry: @@ -989,48 +950,45 @@ define bfloat @fmax_fast_bfloat_zvfbfmin(ptr noalias nocapture readonly %a, i64 ; CHECK-LABEL: define bfloat @fmax_fast_bfloat_zvfbfmin( ; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], i64 [[N:%.*]]) #[[ATTR6]] { ; CHECK-NEXT: [[ENTRY:.*]]: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8 ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x bfloat> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x bfloat> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP9:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true) ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds bfloat, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x bfloat>, ptr [[TMP6]], align 4 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 8 x bfloat> @llvm.vp.load.nxv8bf16.p0(ptr align 4 [[TMP6]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP14]]) ; CHECK-NEXT: [[TMP7:%.*]] = fcmp fast ogt <vscale x 8 x bfloat> [[WIDE_LOAD]], [[VEC_PHI]] -; CHECK-NEXT: [[TMP8]] = select <vscale x 8 x i1> [[TMP7]], <vscale x 8 x bfloat> [[WIDE_LOAD]], <vscale x 8 x bfloat> [[VEC_PHI]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]] +; CHECK-NEXT: [[TMP8:%.*]] = select <vscale x 8 x i1> [[TMP7]], <vscale x 8 x bfloat> [[WIDE_LOAD]], <vscale x 8 x bfloat> [[VEC_PHI]] +; CHECK-NEXT: [[TMP9]] = call <vscale x 8 x bfloat> @llvm.vp.merge.nxv8bf16(<vscale x 8 x i1> splat (i1 true), <vscale x 8 x bfloat> [[TMP8]], <vscale x 8 x bfloat> [[VEC_PHI]], i32 [[TMP14]]) +; CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP14]] to i64 +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP10]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]] +; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP33:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: [[TMP10:%.*]] = call fast bfloat @llvm.vector.reduce.fmax.nxv8bf16(<vscale x 8 x bfloat> [[TMP8]]) -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]] +; CHECK-NEXT: [[TMP12:%.*]] = call fast bfloat @llvm.vector.reduce.fmax.nxv8bf16(<vscale x 8 x bfloat> [[TMP9]]) +; CHECK-NEXT: br label %[[FOR_END:.*]] ; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi bfloat [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 0xR0000, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi bfloat [ 0xR0000, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[FOR_BODY:.*]] ; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[SUM_07:%.*]] = phi bfloat [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[SUM_07:%.*]] = phi bfloat [ 0xR0000, %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds bfloat, ptr [[A]], i64 [[IV]] ; CHECK-NEXT: [[TMP11:%.*]] = load bfloat, ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[CMP_I:%.*]] = fcmp fast ogt bfloat [[TMP11]], [[SUM_07]] ; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], bfloat [[TMP11]], bfloat [[SUM_07]] ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP33:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP34:![0-9]+]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi bfloat [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP10]], %[[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi bfloat [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP12]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret bfloat [[DOTSROA_SPECULATED_LCSSA]] ; entry: @@ -1077,7 +1035,7 @@ define i32 @mul(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) { ; CHECK-NEXT: [[TMP3]] = mul <8 x i32> [[WIDE_LOAD2]], [[VEC_PHI1]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP34:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP35:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[BIN_RDX:%.*]] = mul <8 x i32> [[TMP3]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> [[BIN_RDX]]) @@ -1095,7 +1053,7 @@ define i32 @mul(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) { ; CHECK-NEXT: [[MUL]] = mul nsw i32 [[TMP6]], [[SUM_07]] ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP35:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]] ; CHECK: [[FOR_END]]: ; CHECK-NEXT: [[MUL_LCSSA:%.*]] = phi i32 [ [[MUL]], %[[FOR_BODY]] ], [ [[TMP5]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i32 [[MUL_LCSSA]] @@ -1142,7 +1100,7 @@ define i32 @memory_dependence(ptr noalias nocapture %a, ptr noalias nocapture re ; CHECK-NEXT: [[TMP5]] = mul <8 x i32> [[WIDE_LOAD1]], [[VEC_PHI]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP37:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> [[TMP5]]) ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] @@ -1165,7 +1123,7 @@ define i32 @memory_dependence(ptr noalias nocapture %a, ptr noalias nocapture re ; CHECK-NEXT: [[MUL]] = mul nsw i32 [[TMP9]], [[SUM]] ; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP37:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP38:![0-9]+]] ; CHECK: [[FOR_END]]: ; CHECK-NEXT: [[MUL_LCSSA:%.*]] = phi i32 [ [[MUL]], %[[FOR_BODY]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i32 [[MUL_LCSSA]] @@ -1197,40 +1155,37 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n) { ; CHECK-LABEL: define float @fmuladd( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*]]: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ insertelement (<vscale x 4 x float> splat (float -0.000000e+00), float 0.000000e+00, i32 0), %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ insertelement (<vscale x 4 x float> splat (float -0.000000e+00), float 0.000000e+00, i32 0), %[[VECTOR_PH]] ], [ [[TMP9:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP6]], align 4 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]]) ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x float>, ptr [[TMP7]], align 4 -; CHECK-NEXT: [[TMP8]] = call reassoc <vscale x 4 x float> @llvm.fmuladd.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD]], <vscale x 4 x float> [[WIDE_LOAD1]], <vscale x 4 x float> [[VEC_PHI]]) -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP38:![0-9]+]] +; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP7]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]]) +; CHECK-NEXT: [[TMP8:%.*]] = call reassoc <vscale x 4 x float> @llvm.fmuladd.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD]], <vscale x 4 x float> [[WIDE_LOAD1]], <vscale x 4 x float> [[VEC_PHI]]) +; CHECK-NEXT: [[TMP9]] = call <vscale x 4 x float> @llvm.vp.merge.nxv4f32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x float> [[TMP8]], <vscale x 4 x float> [[VEC_PHI]], i32 [[TMP14]]) +; CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP14]] to i64 +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP10]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]] +; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP39:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: [[TMP10:%.*]] = call reassoc float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[TMP8]]) -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]] +; CHECK-NEXT: [[TMP16:%.*]] = call reassoc float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[TMP9]]) +; CHECK-NEXT: br label %[[FOR_END:.*]] ; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 0.000000e+00, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[FOR_BODY:.*]] ; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[SUM_07:%.*]] = phi float [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[MULADD:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, %[[SCALAR_PH]] ], [ [[MULADD:%.*]], %[[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] ; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]] @@ -1238,9 +1193,9 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n) { ; CHECK-NEXT: [[MULADD]] = tail call reassoc float @llvm.fmuladd.f32(float [[TMP11]], float [[TMP12]], float [[SUM_07]]) ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP39:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP40:![0-9]+]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], %[[FOR_BODY]] ], [ [[TMP10]], %[[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], %[[FOR_BODY]] ], [ [[TMP16]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret float [[MULADD_LCSSA]] ; entry: @@ -1266,40 +1221,37 @@ define half @fmuladd_f16_zvfh(ptr %a, ptr %b, i64 %n) "target-features"="+zvfh" ; CHECK-LABEL: define half @fmuladd_f16_zvfh( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[ENTRY:.*]]: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8 ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x half> [ insertelement (<vscale x 8 x half> splat (half 0xH8000), half 0xH0000, i32 0), %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x half> [ insertelement (<vscale x 8 x half> splat (half 0xH8000), half 0xH0000, i32 0), %[[VECTOR_PH]] ], [ [[TMP9:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true) ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x half>, ptr [[TMP6]], align 4 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr align 4 [[TMP6]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP14]]) ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds half, ptr [[B]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 8 x half>, ptr [[TMP7]], align 4 -; CHECK-NEXT: [[TMP8]] = call reassoc <vscale x 8 x half> @llvm.fmuladd.nxv8f16(<vscale x 8 x half> [[WIDE_LOAD]], <vscale x 8 x half> [[WIDE_LOAD1]], <vscale x 8 x half> [[VEC_PHI]]) -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP40:![0-9]+]] +; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr align 4 [[TMP7]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP14]]) +; CHECK-NEXT: [[TMP8:%.*]] = call reassoc <vscale x 8 x half> @llvm.fmuladd.nxv8f16(<vscale x 8 x half> [[WIDE_LOAD]], <vscale x 8 x half> [[WIDE_LOAD1]], <vscale x 8 x half> [[VEC_PHI]]) +; CHECK-NEXT: [[TMP9]] = call <vscale x 8 x half> @llvm.vp.merge.nxv8f16(<vscale x 8 x i1> splat (i1 true), <vscale x 8 x half> [[TMP8]], <vscale x 8 x half> [[VEC_PHI]], i32 [[TMP14]]) +; CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP14]] to i64 +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP10]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]] +; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP41:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: [[TMP10:%.*]] = call reassoc half @llvm.vector.reduce.fadd.nxv8f16(half 0xH8000, <vscale x 8 x half> [[TMP8]]) -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]] +; CHECK-NEXT: [[TMP16:%.*]] = call reassoc half @llvm.vector.reduce.fadd.nxv8f16(half 0xH8000, <vscale x 8 x half> [[TMP9]]) +; CHECK-NEXT: br label %[[FOR_END:.*]] ; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi half [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 0xH0000, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi half [ 0xH0000, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[FOR_BODY:.*]] ; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[SUM_07:%.*]] = phi half [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[MULADD:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[SUM_07:%.*]] = phi half [ 0xH0000, %[[SCALAR_PH]] ], [ [[MULADD:%.*]], %[[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[IV]] ; CHECK-NEXT: [[TMP11:%.*]] = load half, ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds half, ptr [[B]], i64 [[IV]] @@ -1307,9 +1259,9 @@ define half @fmuladd_f16_zvfh(ptr %a, ptr %b, i64 %n) "target-features"="+zvfh" ; CHECK-NEXT: [[MULADD]] = tail call reassoc half @llvm.fmuladd.f16(half [[TMP11]], half [[TMP12]], half [[SUM_07]]) ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP41:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP42:![0-9]+]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[MULADD_LCSSA:%.*]] = phi half [ [[MULADD]], %[[FOR_BODY]] ], [ [[TMP10]], %[[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[MULADD_LCSSA:%.*]] = phi half [ [[MULADD]], %[[FOR_BODY]] ], [ [[TMP16]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret half [[MULADD_LCSSA]] ; entry: @@ -1360,7 +1312,7 @@ define half @fmuladd_f16_zvfhmin(ptr %a, ptr %b, i64 %n) "target-features"="+zvf ; CHECK-NEXT: [[TMP5]] = call reassoc <16 x half> @llvm.fmuladd.v16f16(<16 x half> [[WIDE_LOAD2]], <16 x half> [[WIDE_LOAD4]], <16 x half> [[VEC_PHI1]]) ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP42:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP43:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd reassoc <16 x half> [[TMP5]], [[TMP4]] ; CHECK-NEXT: [[TMP7:%.*]] = call reassoc half @llvm.vector.reduce.fadd.v16f16(half 0xH8000, <16 x half> [[BIN_RDX]]) @@ -1380,7 +1332,7 @@ define half @fmuladd_f16_zvfhmin(ptr %a, ptr %b, i64 %n) "target-features"="+zvf ; CHECK-NEXT: [[MULADD]] = tail call reassoc half @llvm.fmuladd.f16(half [[TMP8]], half [[TMP9]], half [[SUM_07]]) ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP43:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP44:![0-9]+]] ; CHECK: [[FOR_END]]: ; CHECK-NEXT: [[MULADD_LCSSA:%.*]] = phi half [ [[MULADD]], %[[FOR_BODY]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret half [[MULADD_LCSSA]] @@ -1430,7 +1382,7 @@ define bfloat @fmuladd_bf16(ptr %a, ptr %b, i64 %n) "target-features"="+zvfbfmin ; CHECK-NEXT: [[TMP5]] = call reassoc <16 x bfloat> @llvm.fmuladd.v16bf16(<16 x bfloat> [[WIDE_LOAD2]], <16 x bfloat> [[WIDE_LOAD4]], <16 x bfloat> [[VEC_PHI1]]) ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP44:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP45:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd reassoc <16 x bfloat> [[TMP5]], [[TMP4]] ; CHECK-NEXT: [[TMP7:%.*]] = call reassoc bfloat @llvm.vector.reduce.fadd.v16bf16(bfloat 0xR8000, <16 x bfloat> [[BIN_RDX]]) @@ -1450,7 +1402,7 @@ define bfloat @fmuladd_bf16(ptr %a, ptr %b, i64 %n) "target-features"="+zvfbfmin ; CHECK-NEXT: [[MULADD]] = tail call reassoc bfloat @llvm.fmuladd.bf16(bfloat [[TMP8]], bfloat [[TMP9]], bfloat [[SUM_07]]) ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP45:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP46:![0-9]+]] ; CHECK: [[FOR_END]]: ; CHECK-NEXT: [[MULADD_LCSSA:%.*]] = phi bfloat [ [[MULADD]], %[[FOR_BODY]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret bfloat [[MULADD_LCSSA]] diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage-bf16.ll b/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage-bf16.ll index 5a67b54..346f1cb 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage-bf16.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage-bf16.ll @@ -1,5 +1,9 @@ ; REQUIRES: asserts -; RUN: opt -passes=loop-vectorize -mtriple riscv64 -mattr=+v,+zvfbfmin -debug-only=loop-vectorize,vplan --disable-output -riscv-v-register-bit-width-lmul=1 -S < %s 2>&1 | FileCheck %s +; RUN: opt -passes=loop-vectorize -mtriple riscv64 -mattr=+v,+zvfbfmin -prefer-predicate-over-epilogue=scalar-epilogue -debug-only=loop-vectorize,vplan --disable-output -riscv-v-register-bit-width-lmul=1 -S < %s 2>&1 | FileCheck %s + +; TODO: -prefer-predicate-over-epilogue=scalar-epilogue was added to allow +; unrolling. Calculate register pressure for all VPlans, not just unrolled ones, +; and remove. define void @add(ptr noalias nocapture readonly %src1, ptr noalias nocapture readonly %src2, i32 signext %size, ptr noalias nocapture writeonly %result) { ; CHECK-LABEL: add diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage-f16.ll b/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage-f16.ll index d4909fa..b25bc48 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage-f16.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage-f16.ll @@ -1,6 +1,10 @@ ; REQUIRES: asserts -; RUN: opt -passes=loop-vectorize -mtriple riscv64 -mattr=+v,+zvfh -debug-only=loop-vectorize,vplan --disable-output -riscv-v-register-bit-width-lmul=1 -S < %s 2>&1 | FileCheck %s --check-prefix=ZVFH -; RUN: opt -passes=loop-vectorize -mtriple riscv64 -mattr=+v,+zvfhmin -debug-only=loop-vectorize,vplan --disable-output -riscv-v-register-bit-width-lmul=1 -S < %s 2>&1 | FileCheck %s --check-prefix=ZVFHMIN +; RUN: opt -passes=loop-vectorize -mtriple riscv64 -mattr=+v,+zvfh -prefer-predicate-over-epilogue=scalar-epilogue -debug-only=loop-vectorize,vplan --disable-output -riscv-v-register-bit-width-lmul=1 -S < %s 2>&1 | FileCheck %s --check-prefix=ZVFH +; RUN: opt -passes=loop-vectorize -mtriple riscv64 -mattr=+v,+zvfhmin -prefer-predicate-over-epilogue=scalar-epilogue -debug-only=loop-vectorize,vplan --disable-output -riscv-v-register-bit-width-lmul=1 -S < %s 2>&1 | FileCheck %s --check-prefix=ZVFHMIN + +; TODO: -prefer-predicate-over-epilogue=scalar-epilogue was added to allow +; unrolling. Calculate register pressure for all VPlans, not just unrolled ones, +; and remove. define void @add(ptr noalias nocapture readonly %src1, ptr noalias nocapture readonly %src2, i32 signext %size, ptr noalias nocapture writeonly %result) { ; CHECK-LABEL: add diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage.ll b/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage.ll index 7037282..116ccc9 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage.ll @@ -1,25 +1,29 @@ ; REQUIRES: asserts ; RUN: opt -passes=loop-vectorize -mtriple riscv64-linux-gnu \ ; RUN: -mattr=+v,+d -debug-only=loop-vectorize,vplan --disable-output \ -; RUN: -force-vector-width=1 \ +; RUN: -force-vector-width=1 -prefer-predicate-over-epilogue=scalar-epilogue \ ; RUN: -S < %s 2>&1 | FileCheck %s --check-prefix=CHECK-SCALAR ; RUN: opt -passes=loop-vectorize -mtriple riscv64-linux-gnu \ ; RUN: -mattr=+v,+d -debug-only=loop-vectorize,vplan --disable-output \ -; RUN: -riscv-v-register-bit-width-lmul=1 \ +; RUN: -riscv-v-register-bit-width-lmul=1 -prefer-predicate-over-epilogue=scalar-epilogue \ ; RUN: -S < %s 2>&1 | FileCheck %s --check-prefix=CHECK-LMUL1 ; RUN: opt -passes=loop-vectorize -mtriple riscv64-linux-gnu \ ; RUN: -mattr=+v,+d -debug-only=loop-vectorize,vplan --disable-output \ -; RUN: -riscv-v-register-bit-width-lmul=2 \ +; RUN: -riscv-v-register-bit-width-lmul=2 -prefer-predicate-over-epilogue=scalar-epilogue \ ; RUN: -S < %s 2>&1 | FileCheck %s --check-prefix=CHECK-LMUL2 ; RUN: opt -passes=loop-vectorize -mtriple riscv64-linux-gnu \ ; RUN: -mattr=+v,+d -debug-only=loop-vectorize,vplan --disable-output \ -; RUN: -riscv-v-register-bit-width-lmul=4 \ +; RUN: -riscv-v-register-bit-width-lmul=4 -prefer-predicate-over-epilogue=scalar-epilogue \ ; RUN: -S < %s 2>&1 | FileCheck %s --check-prefix=CHECK-LMUL4 ; RUN: opt -passes=loop-vectorize -mtriple riscv64-linux-gnu \ ; RUN: -mattr=+v,+d -debug-only=loop-vectorize,vplan --disable-output \ -; RUN: -riscv-v-register-bit-width-lmul=8 \ +; RUN: -riscv-v-register-bit-width-lmul=8 -prefer-predicate-over-epilogue=scalar-epilogue \ ; RUN: -S < %s 2>&1 | FileCheck %s --check-prefix=CHECK-LMUL8 +; TODO: -prefer-predicate-over-epilogue=scalar-epilogue was added to allow +; unrolling. Calculate register pressure for all VPlans, not just unrolled ones, +; and remove. + define void @add(ptr noalias nocapture readonly %src1, ptr noalias nocapture readonly %src2, i32 signext %size, ptr noalias nocapture writeonly %result) { ; CHECK-LABEL: add ; CHECK-SCALAR: LV(REG): VF = 1 diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/remark-reductions.ll b/llvm/test/Transforms/LoopVectorize/RISCV/remark-reductions.ll index 85163c7..6413ded3 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/remark-reductions.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/remark-reductions.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 ; RUN: opt < %s -mtriple=riscv64 -mattr=+v -p loop-vectorize -pass-remarks-analysis=loop-vectorize -S 2>&1 | FileCheck %s ; CHECK: remark: <unknown>:0:0: the cost-model indicates that interleaving is not beneficial @@ -5,41 +6,36 @@ define float @s311(float %a_0, float %s311_sum) { ; CHECK-LABEL: define float @s311( ; CHECK-SAME: float [[A_0:%.*]], float [[S311_SUM:%.*]]) #[[ATTR0:[0-9]+]] { ; CHECK-NEXT: [[ENTRY:.*]]: -; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.vscale.i32() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i32 [[TMP0]], 4 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 1200, [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vscale.i32() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i32 [[TMP2]], 4 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 1200, [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 1200, [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vscale.i32() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i32 [[TMP4]], 4 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x float> poison, float [[A_0]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x float> [[BROADCAST_SPLATINSERT]], <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi float [ [[S311_SUM]], %[[VECTOR_PH]] ], [ [[TMP6:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP6]] = call float @llvm.vector.reduce.fadd.nxv4f32(float [[VEC_PHI]], <vscale x 4 x float> [[BROADCAST_SPLAT]]) -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: [[AVL:%.*]] = phi i32 [ 1200, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i32(i32 [[AVL]], i32 4, i1 true) +; CHECK-NEXT: [[TMP6]] = call float @llvm.vp.reduce.fadd.nxv4f32(float [[VEC_PHI]], <vscale x 4 x float> [[BROADCAST_SPLAT]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]]) +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i32 [[TMP9]], [[EVL_BASED_IV]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i32 [[AVL]], [[TMP9]] +; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_EVL_NEXT]], 1200 ; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 1200, [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; CHECK-NEXT: br label %[[EXIT:.*]] ; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP6]], %[[MIDDLE_BLOCK]] ], [ [[S311_SUM]], %[[ENTRY]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[S311_SUM]], %[[ENTRY]] ] ; CHECK-NEXT: br label %[[LOOP:.*]] ; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[RED:%.*]] = phi float [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[RED:%.*]] = phi float [ [[S311_SUM]], %[[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], %[[LOOP]] ] ; CHECK-NEXT: [[RED_NEXT]] = fadd float [[A_0]], [[RED]] ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[IV_NEXT]], 1200 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: [[RED_LCSSA:%.*]] = phi float [ [[RED_NEXT]], %[[LOOP]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret float [[RED_LCSSA]] @@ -60,8 +56,9 @@ exit: ret float %red.lcssa } ;. -; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} +; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]], [[META3:![0-9]+]]} ; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} -; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} -; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]} +; CHECK: [[META2]] = !{!"llvm.loop.isvectorized.tailfoldingstyle", !"evl"} +; CHECK: [[META3]] = !{!"llvm.loop.unroll.runtime.disable"} +; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META3]], [[META1]]} ;. diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll b/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll index 0b3dcf8..6e58fdf 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll @@ -19,98 +19,88 @@ define void @vector_reverse_i32(ptr noalias %A, ptr noalias %B) { ; RV64-LABEL: define void @vector_reverse_i32( ; RV64-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0:[0-9]+]] { ; RV64-NEXT: [[ENTRY:.*]]: -; RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; RV64-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; RV64-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1023, [[TMP1]] -; RV64-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; RV64-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; RV64: [[VECTOR_PH]]: -; RV64-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; RV64-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; RV64-NEXT: [[N_MOD_VF:%.*]] = urem i64 1023, [[TMP3]] -; RV64-NEXT: [[N_VEC:%.*]] = sub i64 1023, [[N_MOD_VF]] ; RV64-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; RV64-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; RV64-NEXT: [[TMP6:%.*]] = sub i64 1023, [[N_VEC]] ; RV64-NEXT: br label %[[VECTOR_BODY:.*]] ; RV64: [[VECTOR_BODY]]: -; RV64-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; RV64-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; RV64-NEXT: [[AVL:%.*]] = phi i64 [ 1023, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; RV64-NEXT: [[TMP19:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; RV64-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1023, [[INDEX]] ; RV64-NEXT: [[TMP7:%.*]] = add nsw i64 [[OFFSET_IDX]], -1 ; RV64-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP7]] -; RV64-NEXT: [[TMP9:%.*]] = mul i64 0, [[TMP5]] -; RV64-NEXT: [[TMP10:%.*]] = sub i64 [[TMP5]], 1 +; RV64-NEXT: [[TMP24:%.*]] = zext i32 [[TMP19]] to i64 +; RV64-NEXT: [[TMP9:%.*]] = mul i64 0, [[TMP24]] +; RV64-NEXT: [[TMP10:%.*]] = sub i64 [[TMP24]], 1 ; RV64-NEXT: [[TMP11:%.*]] = mul i64 -1, [[TMP10]] -; RV64-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i64 [[TMP9]] -; RV64-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[TMP12]], i64 [[TMP11]] -; RV64-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP13]], align 4 -; RV64-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]]) +; RV64-NEXT: [[TMP12:%.*]] = getelementptr i32, ptr [[TMP8]], i64 [[TMP9]] +; RV64-NEXT: [[TMP13:%.*]] = getelementptr i32, ptr [[TMP12]], i64 [[TMP11]] +; RV64-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP13]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP19]]) +; RV64-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP19]]) ; RV64-NEXT: [[TMP14:%.*]] = add <vscale x 4 x i32> [[REVERSE]], splat (i32 1) ; RV64-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP7]] -; RV64-NEXT: [[TMP16:%.*]] = mul i64 0, [[TMP5]] -; RV64-NEXT: [[TMP17:%.*]] = sub i64 [[TMP5]], 1 +; RV64-NEXT: [[TMP16:%.*]] = zext i32 [[TMP19]] to i64 +; RV64-NEXT: [[TMP25:%.*]] = mul i64 0, [[TMP16]] +; RV64-NEXT: [[TMP17:%.*]] = sub i64 [[TMP16]], 1 ; RV64-NEXT: [[TMP18:%.*]] = mul i64 -1, [[TMP17]] -; RV64-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[TMP15]], i64 [[TMP16]] -; RV64-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr [[TMP19]], i64 [[TMP18]] -; RV64-NEXT: [[REVERSE1:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[TMP14]]) -; RV64-NEXT: store <vscale x 4 x i32> [[REVERSE1]], ptr [[TMP20]], align 4 -; RV64-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; RV64-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; RV64-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; RV64-NEXT: [[TMP20:%.*]] = getelementptr i32, ptr [[TMP15]], i64 [[TMP25]] +; RV64-NEXT: [[TMP21:%.*]] = getelementptr i32, ptr [[TMP20]], i64 [[TMP18]] +; RV64-NEXT: [[VP_REVERSE1:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP19]]) +; RV64-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[VP_REVERSE1]], ptr align 4 [[TMP21]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP19]]) +; RV64-NEXT: [[TMP22:%.*]] = zext i32 [[TMP19]] to i64 +; RV64-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP22]], [[INDEX]] +; RV64-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP22]] +; RV64-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1023 +; RV64-NEXT: br i1 [[TMP23]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; RV64: [[MIDDLE_BLOCK]]: -; RV64-NEXT: [[CMP_N:%.*]] = icmp eq i64 1023, [[N_VEC]] -; RV64-NEXT: br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[SCALAR_PH]] +; RV64-NEXT: br [[EXIT:label %.*]] ; RV64: [[SCALAR_PH]]: -; RV64-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP6]], %[[MIDDLE_BLOCK]] ], [ 1023, %[[ENTRY]] ] +; RV64-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 1023, %[[ENTRY]] ] ; RV64-NEXT: br label %[[FOR_BODY:.*]] ; RV64: [[FOR_BODY]]: ; ; RV32-LABEL: define void @vector_reverse_i32( ; RV32-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0:[0-9]+]] { ; RV32-NEXT: [[ENTRY:.*]]: -; RV32-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; RV32-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; RV32-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1023, [[TMP1]] -; RV32-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; RV32-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; RV32: [[VECTOR_PH]]: -; RV32-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; RV32-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; RV32-NEXT: [[N_MOD_VF:%.*]] = urem i64 1023, [[TMP3]] -; RV32-NEXT: [[N_VEC:%.*]] = sub i64 1023, [[N_MOD_VF]] ; RV32-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; RV32-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; RV32-NEXT: [[TMP6:%.*]] = sub i64 1023, [[N_VEC]] ; RV32-NEXT: br label %[[VECTOR_BODY:.*]] ; RV32: [[VECTOR_BODY]]: -; RV32-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; RV32-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; RV32-NEXT: [[AVL:%.*]] = phi i64 [ 1023, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; RV32-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; RV32-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1023, [[INDEX]] ; RV32-NEXT: [[TMP7:%.*]] = add nsw i64 [[OFFSET_IDX]], -1 ; RV32-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP7]] -; RV32-NEXT: [[TMP9:%.*]] = trunc i64 [[TMP5]] to i32 ; RV32-NEXT: [[TMP10:%.*]] = mul i32 0, [[TMP9]] ; RV32-NEXT: [[TMP11:%.*]] = sub i32 [[TMP9]], 1 ; RV32-NEXT: [[TMP12:%.*]] = mul i32 -1, [[TMP11]] -; RV32-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i32 [[TMP10]] -; RV32-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[TMP13]], i32 [[TMP12]] -; RV32-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP14]], align 4 -; RV32-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]]) +; RV32-NEXT: [[TMP13:%.*]] = getelementptr i32, ptr [[TMP8]], i32 [[TMP10]] +; RV32-NEXT: [[TMP14:%.*]] = getelementptr i32, ptr [[TMP13]], i32 [[TMP12]] +; RV32-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]]) +; RV32-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]]) ; RV32-NEXT: [[TMP15:%.*]] = add <vscale x 4 x i32> [[REVERSE]], splat (i32 1) ; RV32-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP7]] -; RV32-NEXT: [[TMP17:%.*]] = trunc i64 [[TMP5]] to i32 -; RV32-NEXT: [[TMP18:%.*]] = mul i32 0, [[TMP17]] -; RV32-NEXT: [[TMP19:%.*]] = sub i32 [[TMP17]], 1 +; RV32-NEXT: [[TMP17:%.*]] = mul i32 0, [[TMP9]] +; RV32-NEXT: [[TMP19:%.*]] = sub i32 [[TMP9]], 1 ; RV32-NEXT: [[TMP20:%.*]] = mul i32 -1, [[TMP19]] -; RV32-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, ptr [[TMP16]], i32 [[TMP18]] -; RV32-NEXT: [[TMP22:%.*]] = getelementptr inbounds i32, ptr [[TMP21]], i32 [[TMP20]] -; RV32-NEXT: [[REVERSE1:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[TMP15]]) -; RV32-NEXT: store <vscale x 4 x i32> [[REVERSE1]], ptr [[TMP22]], align 4 -; RV32-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; RV32-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; RV32-NEXT: br i1 [[TMP23]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; RV32-NEXT: [[TMP18:%.*]] = getelementptr i32, ptr [[TMP16]], i32 [[TMP17]] +; RV32-NEXT: [[TMP22:%.*]] = getelementptr i32, ptr [[TMP18]], i32 [[TMP20]] +; RV32-NEXT: [[VP_REVERSE1:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> [[TMP15]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]]) +; RV32-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[VP_REVERSE1]], ptr align 4 [[TMP22]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]]) +; RV32-NEXT: [[TMP23:%.*]] = zext i32 [[TMP9]] to i64 +; RV32-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP23]], [[INDEX]] +; RV32-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP23]] +; RV32-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1023 +; RV32-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; RV32: [[MIDDLE_BLOCK]]: -; RV32-NEXT: [[CMP_N:%.*]] = icmp eq i64 1023, [[N_VEC]] -; RV32-NEXT: br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[SCALAR_PH]] +; RV32-NEXT: br [[EXIT:label %.*]] ; RV32: [[SCALAR_PH]]: -; RV32-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP6]], %[[MIDDLE_BLOCK]] ], [ 1023, %[[ENTRY]] ] +; RV32-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 1023, %[[ENTRY]] ] ; RV32-NEXT: br label %[[FOR_BODY:.*]] ; RV32: [[FOR_BODY]]: ; @@ -122,14 +112,12 @@ define void @vector_reverse_i32(ptr noalias %A, ptr noalias %B) { ; RV64-UF2-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1023, [[TMP1]] ; RV64-UF2-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; RV64-UF2: [[VECTOR_PH]]: -; RV64-UF2-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; RV64-UF2-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 -; RV64-UF2-NEXT: [[N_MOD_VF:%.*]] = urem i64 1023, [[TMP3]] -; RV64-UF2-NEXT: [[N_VEC:%.*]] = sub i64 1023, [[N_MOD_VF]] ; RV64-UF2-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; RV64-UF2-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; RV64-UF2-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 2 +; RV64-UF2-NEXT: [[N_VEC:%.*]] = urem i64 1023, [[TMP6]] ; RV64-UF2-NEXT: [[TMP7:%.*]] = sub i64 1023, [[N_VEC]] +; RV64-UF2-NEXT: [[TMP33:%.*]] = sub i64 1023, [[TMP7]] ; RV64-UF2-NEXT: br label %[[VECTOR_BODY:.*]] ; RV64-UF2: [[VECTOR_BODY]]: ; RV64-UF2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -168,13 +156,13 @@ define void @vector_reverse_i32(ptr noalias %A, ptr noalias %B) { ; RV64-UF2-NEXT: [[REVERSE4:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[TMP21]]) ; RV64-UF2-NEXT: store <vscale x 4 x i32> [[REVERSE4]], ptr [[TMP32]], align 4 ; RV64-UF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] -; RV64-UF2-NEXT: [[TMP33:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; RV64-UF2-NEXT: br i1 [[TMP33]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; RV64-UF2-NEXT: [[TMP34:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[TMP7]] +; RV64-UF2-NEXT: br i1 [[TMP34]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; RV64-UF2: [[MIDDLE_BLOCK]]: -; RV64-UF2-NEXT: [[CMP_N:%.*]] = icmp eq i64 1023, [[N_VEC]] +; RV64-UF2-NEXT: [[CMP_N:%.*]] = icmp eq i64 1023, [[TMP7]] ; RV64-UF2-NEXT: br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[SCALAR_PH]] ; RV64-UF2: [[SCALAR_PH]]: -; RV64-UF2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP7]], %[[MIDDLE_BLOCK]] ], [ 1023, %[[ENTRY]] ] +; RV64-UF2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP33]], %[[MIDDLE_BLOCK]] ], [ 1023, %[[ENTRY]] ] ; RV64-UF2-NEXT: br label %[[FOR_BODY:.*]] ; RV64-UF2: [[FOR_BODY]]: ; @@ -206,10 +194,7 @@ define void @vector_reverse_i64(ptr nocapture noundef writeonly %A, ptr nocaptur ; RV64-NEXT: br i1 [[CMP7]], label %[[FOR_BODY_PREHEADER:.*]], label %[[FOR_COND_CLEANUP:.*]] ; RV64: [[FOR_BODY_PREHEADER]]: ; RV64-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64 -; RV64-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() -; RV64-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP1]], 4 -; RV64-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]] -; RV64-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]] +; RV64-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]] ; RV64: [[VECTOR_SCEVCHECK]]: ; RV64-NEXT: [[TMP3:%.*]] = add nsw i64 [[TMP0]], -1 ; RV64-NEXT: [[TMP4:%.*]] = add i32 [[N]], -1 @@ -231,48 +216,46 @@ define void @vector_reverse_i64(ptr nocapture noundef writeonly %A, ptr nocaptur ; RV64-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP14]], [[TMP13]] ; RV64-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; RV64: [[VECTOR_PH]]: -; RV64-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64() -; RV64-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP15]], 4 -; RV64-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP16]] -; RV64-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]] ; RV64-NEXT: [[TMP17:%.*]] = call i64 @llvm.vscale.i64() ; RV64-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP17]], 4 -; RV64-NEXT: [[TMP19:%.*]] = sub i64 [[TMP0]], [[N_VEC]] -; RV64-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i32 -; RV64-NEXT: [[TMP20:%.*]] = sub i32 [[N]], [[DOTCAST]] ; RV64-NEXT: br label %[[VECTOR_BODY:.*]] ; RV64: [[VECTOR_BODY]]: -; RV64-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; RV64-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; RV64-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP0]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; RV64-NEXT: [[TMP20:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; RV64-NEXT: [[DOTCAST3:%.*]] = trunc i64 [[INDEX]] to i32 ; RV64-NEXT: [[OFFSET_IDX:%.*]] = sub i32 [[N]], [[DOTCAST3]] ; RV64-NEXT: [[TMP21:%.*]] = add nsw i32 [[OFFSET_IDX]], -1 ; RV64-NEXT: [[TMP22:%.*]] = zext i32 [[TMP21]] to i64 ; RV64-NEXT: [[TMP23:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP22]] -; RV64-NEXT: [[TMP24:%.*]] = mul i64 0, [[TMP18]] -; RV64-NEXT: [[TMP25:%.*]] = sub i64 [[TMP18]], 1 +; RV64-NEXT: [[TMP24:%.*]] = zext i32 [[TMP20]] to i64 +; RV64-NEXT: [[TMP28:%.*]] = mul i64 0, [[TMP24]] +; RV64-NEXT: [[TMP25:%.*]] = sub i64 [[TMP24]], 1 ; RV64-NEXT: [[TMP26:%.*]] = mul i64 -1, [[TMP25]] -; RV64-NEXT: [[TMP27:%.*]] = getelementptr inbounds i32, ptr [[TMP23]], i64 [[TMP24]] -; RV64-NEXT: [[TMP28:%.*]] = getelementptr inbounds i32, ptr [[TMP27]], i64 [[TMP26]] -; RV64-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP28]], align 4 -; RV64-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]]) +; RV64-NEXT: [[TMP38:%.*]] = getelementptr i32, ptr [[TMP23]], i64 [[TMP28]] +; RV64-NEXT: [[TMP27:%.*]] = getelementptr i32, ptr [[TMP38]], i64 [[TMP26]] +; RV64-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP27]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP20]]) +; RV64-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP20]]) ; RV64-NEXT: [[TMP29:%.*]] = add <vscale x 4 x i32> [[REVERSE]], splat (i32 1) ; RV64-NEXT: [[TMP30:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP22]] -; RV64-NEXT: [[TMP31:%.*]] = mul i64 0, [[TMP18]] -; RV64-NEXT: [[TMP32:%.*]] = sub i64 [[TMP18]], 1 +; RV64-NEXT: [[TMP39:%.*]] = zext i32 [[TMP20]] to i64 +; RV64-NEXT: [[TMP31:%.*]] = mul i64 0, [[TMP39]] +; RV64-NEXT: [[TMP32:%.*]] = sub i64 [[TMP39]], 1 ; RV64-NEXT: [[TMP33:%.*]] = mul i64 -1, [[TMP32]] -; RV64-NEXT: [[TMP34:%.*]] = getelementptr inbounds i32, ptr [[TMP30]], i64 [[TMP31]] -; RV64-NEXT: [[TMP35:%.*]] = getelementptr inbounds i32, ptr [[TMP34]], i64 [[TMP33]] -; RV64-NEXT: [[REVERSE4:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[TMP29]]) -; RV64-NEXT: store <vscale x 4 x i32> [[REVERSE4]], ptr [[TMP35]], align 4 -; RV64-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP18]] -; RV64-NEXT: [[TMP36:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; RV64-NEXT: br i1 [[TMP36]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; RV64-NEXT: [[TMP34:%.*]] = getelementptr i32, ptr [[TMP30]], i64 [[TMP31]] +; RV64-NEXT: [[TMP35:%.*]] = getelementptr i32, ptr [[TMP34]], i64 [[TMP33]] +; RV64-NEXT: [[VP_REVERSE3:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> [[TMP29]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP20]]) +; RV64-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[VP_REVERSE3]], ptr align 4 [[TMP35]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP20]]) +; RV64-NEXT: [[TMP36:%.*]] = zext i32 [[TMP20]] to i64 +; RV64-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP36]], [[INDEX]] +; RV64-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP36]] +; RV64-NEXT: [[TMP37:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TMP0]] +; RV64-NEXT: br i1 [[TMP37]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; RV64: [[MIDDLE_BLOCK]]: -; RV64-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] -; RV64-NEXT: br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]], label %[[SCALAR_PH]] +; RV64-NEXT: br label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]] ; RV64: [[SCALAR_PH]]: -; RV64-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP19]], %[[MIDDLE_BLOCK]] ], [ [[TMP0]], %[[FOR_BODY_PREHEADER]] ], [ [[TMP0]], %[[VECTOR_SCEVCHECK]] ], [ [[TMP0]], %[[VECTOR_MEMCHECK]] ] -; RV64-NEXT: [[BC_RESUME_VAL5:%.*]] = phi i32 [ [[TMP20]], %[[MIDDLE_BLOCK]] ], [ [[N]], %[[FOR_BODY_PREHEADER]] ], [ [[N]], %[[VECTOR_SCEVCHECK]] ], [ [[N]], %[[VECTOR_MEMCHECK]] ] +; RV64-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP0]], %[[FOR_BODY_PREHEADER]] ], [ [[TMP0]], %[[VECTOR_SCEVCHECK]] ], [ [[TMP0]], %[[VECTOR_MEMCHECK]] ] +; RV64-NEXT: [[BC_RESUME_VAL4:%.*]] = phi i32 [ [[N]], %[[FOR_BODY_PREHEADER]] ], [ [[N]], %[[VECTOR_SCEVCHECK]] ], [ [[N]], %[[VECTOR_MEMCHECK]] ] ; RV64-NEXT: br label %[[FOR_BODY:.*]] ; RV64: [[FOR_COND_CLEANUP_LOOPEXIT]]: ; RV64-NEXT: br label %[[FOR_COND_CLEANUP]] @@ -289,10 +272,7 @@ define void @vector_reverse_i64(ptr nocapture noundef writeonly %A, ptr nocaptur ; RV32-NEXT: br i1 [[CMP7]], label %[[FOR_BODY_PREHEADER:.*]], label %[[FOR_COND_CLEANUP:.*]] ; RV32: [[FOR_BODY_PREHEADER]]: ; RV32-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64 -; RV32-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() -; RV32-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP1]], 4 -; RV32-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]] -; RV32-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; RV32-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] ; RV32: [[VECTOR_MEMCHECK]]: ; RV32-NEXT: [[TMP3:%.*]] = call i32 @llvm.vscale.i32() ; RV32-NEXT: [[TMP4:%.*]] = mul nuw i32 [[TMP3]], 4 @@ -301,50 +281,44 @@ define void @vector_reverse_i64(ptr nocapture noundef writeonly %A, ptr nocaptur ; RV32-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i32 [[TMP6]], [[TMP5]] ; RV32-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; RV32: [[VECTOR_PH]]: -; RV32-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() -; RV32-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 -; RV32-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP8]] -; RV32-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]] ; RV32-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() ; RV32-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 -; RV32-NEXT: [[TMP11:%.*]] = sub i64 [[TMP0]], [[N_VEC]] -; RV32-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i32 -; RV32-NEXT: [[TMP12:%.*]] = sub i32 [[N]], [[DOTCAST]] ; RV32-NEXT: br label %[[VECTOR_BODY:.*]] ; RV32: [[VECTOR_BODY]]: -; RV32-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; RV32-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; RV32-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP0]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; RV32-NEXT: [[TMP16:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; RV32-NEXT: [[DOTCAST3:%.*]] = trunc i64 [[INDEX]] to i32 ; RV32-NEXT: [[OFFSET_IDX:%.*]] = sub i32 [[N]], [[DOTCAST3]] ; RV32-NEXT: [[TMP13:%.*]] = add nsw i32 [[OFFSET_IDX]], -1 ; RV32-NEXT: [[TMP14:%.*]] = zext i32 [[TMP13]] to i64 ; RV32-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP14]] -; RV32-NEXT: [[TMP16:%.*]] = trunc i64 [[TMP10]] to i32 ; RV32-NEXT: [[TMP17:%.*]] = mul i32 0, [[TMP16]] ; RV32-NEXT: [[TMP18:%.*]] = sub i32 [[TMP16]], 1 ; RV32-NEXT: [[TMP19:%.*]] = mul i32 -1, [[TMP18]] -; RV32-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr [[TMP15]], i32 [[TMP17]] -; RV32-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, ptr [[TMP20]], i32 [[TMP19]] -; RV32-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP21]], align 4 -; RV32-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]]) +; RV32-NEXT: [[TMP20:%.*]] = getelementptr i32, ptr [[TMP15]], i32 [[TMP17]] +; RV32-NEXT: [[TMP28:%.*]] = getelementptr i32, ptr [[TMP20]], i32 [[TMP19]] +; RV32-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP28]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP16]]) +; RV32-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP16]]) ; RV32-NEXT: [[TMP22:%.*]] = add <vscale x 4 x i32> [[REVERSE]], splat (i32 1) ; RV32-NEXT: [[TMP23:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP14]] -; RV32-NEXT: [[TMP24:%.*]] = trunc i64 [[TMP10]] to i32 -; RV32-NEXT: [[TMP25:%.*]] = mul i32 0, [[TMP24]] -; RV32-NEXT: [[TMP26:%.*]] = sub i32 [[TMP24]], 1 +; RV32-NEXT: [[TMP21:%.*]] = mul i32 0, [[TMP16]] +; RV32-NEXT: [[TMP26:%.*]] = sub i32 [[TMP16]], 1 ; RV32-NEXT: [[TMP27:%.*]] = mul i32 -1, [[TMP26]] -; RV32-NEXT: [[TMP28:%.*]] = getelementptr inbounds i32, ptr [[TMP23]], i32 [[TMP25]] -; RV32-NEXT: [[TMP29:%.*]] = getelementptr inbounds i32, ptr [[TMP28]], i32 [[TMP27]] -; RV32-NEXT: [[REVERSE4:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[TMP22]]) -; RV32-NEXT: store <vscale x 4 x i32> [[REVERSE4]], ptr [[TMP29]], align 4 -; RV32-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]] -; RV32-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; RV32-NEXT: br i1 [[TMP30]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; RV32-NEXT: [[TMP24:%.*]] = getelementptr i32, ptr [[TMP23]], i32 [[TMP21]] +; RV32-NEXT: [[TMP25:%.*]] = getelementptr i32, ptr [[TMP24]], i32 [[TMP27]] +; RV32-NEXT: [[VP_REVERSE3:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> [[TMP22]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP16]]) +; RV32-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[VP_REVERSE3]], ptr align 4 [[TMP25]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP16]]) +; RV32-NEXT: [[TMP29:%.*]] = zext i32 [[TMP16]] to i64 +; RV32-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP29]], [[INDEX]] +; RV32-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP29]] +; RV32-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TMP0]] +; RV32-NEXT: br i1 [[TMP30]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; RV32: [[MIDDLE_BLOCK]]: -; RV32-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] -; RV32-NEXT: br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]], label %[[SCALAR_PH]] +; RV32-NEXT: br label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]] ; RV32: [[SCALAR_PH]]: -; RV32-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP11]], %[[MIDDLE_BLOCK]] ], [ [[TMP0]], %[[FOR_BODY_PREHEADER]] ], [ [[TMP0]], %[[VECTOR_MEMCHECK]] ] -; RV32-NEXT: [[BC_RESUME_VAL5:%.*]] = phi i32 [ [[TMP12]], %[[MIDDLE_BLOCK]] ], [ [[N]], %[[FOR_BODY_PREHEADER]] ], [ [[N]], %[[VECTOR_MEMCHECK]] ] +; RV32-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP0]], %[[FOR_BODY_PREHEADER]] ], [ [[TMP0]], %[[VECTOR_MEMCHECK]] ] +; RV32-NEXT: [[BC_RESUME_VAL4:%.*]] = phi i32 [ [[N]], %[[FOR_BODY_PREHEADER]] ], [ [[N]], %[[VECTOR_MEMCHECK]] ] ; RV32-NEXT: br label %[[FOR_BODY:.*]] ; RV32: [[FOR_COND_CLEANUP_LOOPEXIT]]: ; RV32-NEXT: br label %[[FOR_COND_CLEANUP]] @@ -386,15 +360,13 @@ define void @vector_reverse_i64(ptr nocapture noundef writeonly %A, ptr nocaptur ; RV64-UF2-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP14]], [[TMP13]] ; RV64-UF2-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; RV64-UF2: [[VECTOR_PH]]: -; RV64-UF2-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64() -; RV64-UF2-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP15]], 8 -; RV64-UF2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP16]] -; RV64-UF2-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]] ; RV64-UF2-NEXT: [[TMP17:%.*]] = call i64 @llvm.vscale.i64() ; RV64-UF2-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP17]], 4 ; RV64-UF2-NEXT: [[TMP19:%.*]] = mul i64 [[TMP18]], 2 +; RV64-UF2-NEXT: [[N_VEC:%.*]] = urem i64 [[TMP0]], [[TMP19]] ; RV64-UF2-NEXT: [[TMP20:%.*]] = sub i64 [[TMP0]], [[N_VEC]] -; RV64-UF2-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i32 +; RV64-UF2-NEXT: [[TMP48:%.*]] = sub i64 [[TMP0]], [[TMP20]] +; RV64-UF2-NEXT: [[DOTCAST:%.*]] = trunc i64 [[TMP20]] to i32 ; RV64-UF2-NEXT: [[TMP21:%.*]] = sub i32 [[N]], [[DOTCAST]] ; RV64-UF2-NEXT: br label %[[VECTOR_BODY:.*]] ; RV64-UF2: [[VECTOR_BODY]]: @@ -436,13 +408,13 @@ define void @vector_reverse_i64(ptr nocapture noundef writeonly %A, ptr nocaptur ; RV64-UF2-NEXT: [[REVERSE7:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[TMP36]]) ; RV64-UF2-NEXT: store <vscale x 4 x i32> [[REVERSE7]], ptr [[TMP47]], align 4 ; RV64-UF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP19]] -; RV64-UF2-NEXT: [[TMP48:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; RV64-UF2-NEXT: br i1 [[TMP48]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; RV64-UF2-NEXT: [[TMP49:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[TMP20]] +; RV64-UF2-NEXT: br i1 [[TMP49]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; RV64-UF2: [[MIDDLE_BLOCK]]: -; RV64-UF2-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] +; RV64-UF2-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[TMP20]] ; RV64-UF2-NEXT: br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]], label %[[SCALAR_PH]] ; RV64-UF2: [[SCALAR_PH]]: -; RV64-UF2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP20]], %[[MIDDLE_BLOCK]] ], [ [[TMP0]], %[[FOR_BODY_PREHEADER]] ], [ [[TMP0]], %[[VECTOR_SCEVCHECK]] ], [ [[TMP0]], %[[VECTOR_MEMCHECK]] ] +; RV64-UF2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP48]], %[[MIDDLE_BLOCK]] ], [ [[TMP0]], %[[FOR_BODY_PREHEADER]] ], [ [[TMP0]], %[[VECTOR_SCEVCHECK]] ], [ [[TMP0]], %[[VECTOR_MEMCHECK]] ] ; RV64-UF2-NEXT: [[BC_RESUME_VAL8:%.*]] = phi i32 [ [[TMP21]], %[[MIDDLE_BLOCK]] ], [ [[N]], %[[FOR_BODY_PREHEADER]] ], [ [[N]], %[[VECTOR_SCEVCHECK]] ], [ [[N]], %[[VECTOR_MEMCHECK]] ] ; RV64-UF2-NEXT: br label %[[FOR_BODY:.*]] ; RV64-UF2: [[FOR_COND_CLEANUP_LOOPEXIT]]: @@ -487,10 +459,7 @@ define void @vector_reverse_f32(ptr nocapture noundef writeonly %A, ptr nocaptur ; RV64-NEXT: br i1 [[CMP7]], label %[[FOR_BODY_PREHEADER:.*]], label %[[FOR_COND_CLEANUP:.*]] ; RV64: [[FOR_BODY_PREHEADER]]: ; RV64-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64 -; RV64-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() -; RV64-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP1]], 4 -; RV64-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]] -; RV64-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]] +; RV64-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]] ; RV64: [[VECTOR_SCEVCHECK]]: ; RV64-NEXT: [[TMP3:%.*]] = add nsw i64 [[TMP0]], -1 ; RV64-NEXT: [[TMP4:%.*]] = add i32 [[N]], -1 @@ -512,48 +481,46 @@ define void @vector_reverse_f32(ptr nocapture noundef writeonly %A, ptr nocaptur ; RV64-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP14]], [[TMP13]] ; RV64-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; RV64: [[VECTOR_PH]]: -; RV64-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64() -; RV64-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP15]], 4 -; RV64-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP16]] -; RV64-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]] ; RV64-NEXT: [[TMP17:%.*]] = call i64 @llvm.vscale.i64() ; RV64-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP17]], 4 -; RV64-NEXT: [[TMP19:%.*]] = sub i64 [[TMP0]], [[N_VEC]] -; RV64-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i32 -; RV64-NEXT: [[TMP20:%.*]] = sub i32 [[N]], [[DOTCAST]] ; RV64-NEXT: br label %[[VECTOR_BODY:.*]] ; RV64: [[VECTOR_BODY]]: -; RV64-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; RV64-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; RV64-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP0]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; RV64-NEXT: [[TMP20:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; RV64-NEXT: [[DOTCAST3:%.*]] = trunc i64 [[INDEX]] to i32 ; RV64-NEXT: [[OFFSET_IDX:%.*]] = sub i32 [[N]], [[DOTCAST3]] ; RV64-NEXT: [[TMP21:%.*]] = add nsw i32 [[OFFSET_IDX]], -1 ; RV64-NEXT: [[TMP22:%.*]] = zext i32 [[TMP21]] to i64 ; RV64-NEXT: [[TMP23:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP22]] -; RV64-NEXT: [[TMP24:%.*]] = mul i64 0, [[TMP18]] -; RV64-NEXT: [[TMP25:%.*]] = sub i64 [[TMP18]], 1 +; RV64-NEXT: [[TMP24:%.*]] = zext i32 [[TMP20]] to i64 +; RV64-NEXT: [[TMP28:%.*]] = mul i64 0, [[TMP24]] +; RV64-NEXT: [[TMP25:%.*]] = sub i64 [[TMP24]], 1 ; RV64-NEXT: [[TMP26:%.*]] = mul i64 -1, [[TMP25]] -; RV64-NEXT: [[TMP27:%.*]] = getelementptr inbounds float, ptr [[TMP23]], i64 [[TMP24]] -; RV64-NEXT: [[TMP28:%.*]] = getelementptr inbounds float, ptr [[TMP27]], i64 [[TMP26]] -; RV64-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP28]], align 4 -; RV64-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD]]) +; RV64-NEXT: [[TMP38:%.*]] = getelementptr float, ptr [[TMP23]], i64 [[TMP28]] +; RV64-NEXT: [[TMP27:%.*]] = getelementptr float, ptr [[TMP38]], i64 [[TMP26]] +; RV64-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP27]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP20]]) +; RV64-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x float> @llvm.experimental.vp.reverse.nxv4f32(<vscale x 4 x float> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP20]]) ; RV64-NEXT: [[TMP29:%.*]] = fadd <vscale x 4 x float> [[REVERSE]], splat (float 1.000000e+00) ; RV64-NEXT: [[TMP30:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP22]] -; RV64-NEXT: [[TMP31:%.*]] = mul i64 0, [[TMP18]] -; RV64-NEXT: [[TMP32:%.*]] = sub i64 [[TMP18]], 1 +; RV64-NEXT: [[TMP39:%.*]] = zext i32 [[TMP20]] to i64 +; RV64-NEXT: [[TMP31:%.*]] = mul i64 0, [[TMP39]] +; RV64-NEXT: [[TMP32:%.*]] = sub i64 [[TMP39]], 1 ; RV64-NEXT: [[TMP33:%.*]] = mul i64 -1, [[TMP32]] -; RV64-NEXT: [[TMP34:%.*]] = getelementptr inbounds float, ptr [[TMP30]], i64 [[TMP31]] -; RV64-NEXT: [[TMP35:%.*]] = getelementptr inbounds float, ptr [[TMP34]], i64 [[TMP33]] -; RV64-NEXT: [[REVERSE4:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[TMP29]]) -; RV64-NEXT: store <vscale x 4 x float> [[REVERSE4]], ptr [[TMP35]], align 4 -; RV64-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP18]] -; RV64-NEXT: [[TMP36:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; RV64-NEXT: br i1 [[TMP36]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; RV64-NEXT: [[TMP34:%.*]] = getelementptr float, ptr [[TMP30]], i64 [[TMP31]] +; RV64-NEXT: [[TMP35:%.*]] = getelementptr float, ptr [[TMP34]], i64 [[TMP33]] +; RV64-NEXT: [[VP_REVERSE3:%.*]] = call <vscale x 4 x float> @llvm.experimental.vp.reverse.nxv4f32(<vscale x 4 x float> [[TMP29]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP20]]) +; RV64-NEXT: call void @llvm.vp.store.nxv4f32.p0(<vscale x 4 x float> [[VP_REVERSE3]], ptr align 4 [[TMP35]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP20]]) +; RV64-NEXT: [[TMP36:%.*]] = zext i32 [[TMP20]] to i64 +; RV64-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP36]], [[INDEX]] +; RV64-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP36]] +; RV64-NEXT: [[TMP37:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TMP0]] +; RV64-NEXT: br i1 [[TMP37]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; RV64: [[MIDDLE_BLOCK]]: -; RV64-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] -; RV64-NEXT: br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]], label %[[SCALAR_PH]] +; RV64-NEXT: br label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]] ; RV64: [[SCALAR_PH]]: -; RV64-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP19]], %[[MIDDLE_BLOCK]] ], [ [[TMP0]], %[[FOR_BODY_PREHEADER]] ], [ [[TMP0]], %[[VECTOR_SCEVCHECK]] ], [ [[TMP0]], %[[VECTOR_MEMCHECK]] ] -; RV64-NEXT: [[BC_RESUME_VAL5:%.*]] = phi i32 [ [[TMP20]], %[[MIDDLE_BLOCK]] ], [ [[N]], %[[FOR_BODY_PREHEADER]] ], [ [[N]], %[[VECTOR_SCEVCHECK]] ], [ [[N]], %[[VECTOR_MEMCHECK]] ] +; RV64-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP0]], %[[FOR_BODY_PREHEADER]] ], [ [[TMP0]], %[[VECTOR_SCEVCHECK]] ], [ [[TMP0]], %[[VECTOR_MEMCHECK]] ] +; RV64-NEXT: [[BC_RESUME_VAL4:%.*]] = phi i32 [ [[N]], %[[FOR_BODY_PREHEADER]] ], [ [[N]], %[[VECTOR_SCEVCHECK]] ], [ [[N]], %[[VECTOR_MEMCHECK]] ] ; RV64-NEXT: br label %[[FOR_BODY:.*]] ; RV64: [[FOR_COND_CLEANUP_LOOPEXIT]]: ; RV64-NEXT: br label %[[FOR_COND_CLEANUP]] @@ -570,10 +537,7 @@ define void @vector_reverse_f32(ptr nocapture noundef writeonly %A, ptr nocaptur ; RV32-NEXT: br i1 [[CMP7]], label %[[FOR_BODY_PREHEADER:.*]], label %[[FOR_COND_CLEANUP:.*]] ; RV32: [[FOR_BODY_PREHEADER]]: ; RV32-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64 -; RV32-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() -; RV32-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP1]], 4 -; RV32-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]] -; RV32-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; RV32-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] ; RV32: [[VECTOR_MEMCHECK]]: ; RV32-NEXT: [[TMP3:%.*]] = call i32 @llvm.vscale.i32() ; RV32-NEXT: [[TMP4:%.*]] = mul nuw i32 [[TMP3]], 4 @@ -582,50 +546,44 @@ define void @vector_reverse_f32(ptr nocapture noundef writeonly %A, ptr nocaptur ; RV32-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i32 [[TMP6]], [[TMP5]] ; RV32-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; RV32: [[VECTOR_PH]]: -; RV32-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() -; RV32-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 -; RV32-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP8]] -; RV32-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]] ; RV32-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() ; RV32-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 -; RV32-NEXT: [[TMP11:%.*]] = sub i64 [[TMP0]], [[N_VEC]] -; RV32-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i32 -; RV32-NEXT: [[TMP12:%.*]] = sub i32 [[N]], [[DOTCAST]] ; RV32-NEXT: br label %[[VECTOR_BODY:.*]] ; RV32: [[VECTOR_BODY]]: -; RV32-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; RV32-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; RV32-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP0]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; RV32-NEXT: [[TMP16:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; RV32-NEXT: [[DOTCAST3:%.*]] = trunc i64 [[INDEX]] to i32 ; RV32-NEXT: [[OFFSET_IDX:%.*]] = sub i32 [[N]], [[DOTCAST3]] ; RV32-NEXT: [[TMP13:%.*]] = add nsw i32 [[OFFSET_IDX]], -1 ; RV32-NEXT: [[TMP14:%.*]] = zext i32 [[TMP13]] to i64 ; RV32-NEXT: [[TMP15:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP14]] -; RV32-NEXT: [[TMP16:%.*]] = trunc i64 [[TMP10]] to i32 ; RV32-NEXT: [[TMP17:%.*]] = mul i32 0, [[TMP16]] ; RV32-NEXT: [[TMP18:%.*]] = sub i32 [[TMP16]], 1 ; RV32-NEXT: [[TMP19:%.*]] = mul i32 -1, [[TMP18]] -; RV32-NEXT: [[TMP20:%.*]] = getelementptr inbounds float, ptr [[TMP15]], i32 [[TMP17]] -; RV32-NEXT: [[TMP21:%.*]] = getelementptr inbounds float, ptr [[TMP20]], i32 [[TMP19]] -; RV32-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP21]], align 4 -; RV32-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD]]) +; RV32-NEXT: [[TMP20:%.*]] = getelementptr float, ptr [[TMP15]], i32 [[TMP17]] +; RV32-NEXT: [[TMP28:%.*]] = getelementptr float, ptr [[TMP20]], i32 [[TMP19]] +; RV32-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP28]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP16]]) +; RV32-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x float> @llvm.experimental.vp.reverse.nxv4f32(<vscale x 4 x float> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP16]]) ; RV32-NEXT: [[TMP22:%.*]] = fadd <vscale x 4 x float> [[REVERSE]], splat (float 1.000000e+00) ; RV32-NEXT: [[TMP23:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP14]] -; RV32-NEXT: [[TMP24:%.*]] = trunc i64 [[TMP10]] to i32 -; RV32-NEXT: [[TMP25:%.*]] = mul i32 0, [[TMP24]] -; RV32-NEXT: [[TMP26:%.*]] = sub i32 [[TMP24]], 1 +; RV32-NEXT: [[TMP21:%.*]] = mul i32 0, [[TMP16]] +; RV32-NEXT: [[TMP26:%.*]] = sub i32 [[TMP16]], 1 ; RV32-NEXT: [[TMP27:%.*]] = mul i32 -1, [[TMP26]] -; RV32-NEXT: [[TMP28:%.*]] = getelementptr inbounds float, ptr [[TMP23]], i32 [[TMP25]] -; RV32-NEXT: [[TMP29:%.*]] = getelementptr inbounds float, ptr [[TMP28]], i32 [[TMP27]] -; RV32-NEXT: [[REVERSE4:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[TMP22]]) -; RV32-NEXT: store <vscale x 4 x float> [[REVERSE4]], ptr [[TMP29]], align 4 -; RV32-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]] -; RV32-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; RV32-NEXT: br i1 [[TMP30]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; RV32-NEXT: [[TMP24:%.*]] = getelementptr float, ptr [[TMP23]], i32 [[TMP21]] +; RV32-NEXT: [[TMP25:%.*]] = getelementptr float, ptr [[TMP24]], i32 [[TMP27]] +; RV32-NEXT: [[VP_REVERSE3:%.*]] = call <vscale x 4 x float> @llvm.experimental.vp.reverse.nxv4f32(<vscale x 4 x float> [[TMP22]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP16]]) +; RV32-NEXT: call void @llvm.vp.store.nxv4f32.p0(<vscale x 4 x float> [[VP_REVERSE3]], ptr align 4 [[TMP25]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP16]]) +; RV32-NEXT: [[TMP29:%.*]] = zext i32 [[TMP16]] to i64 +; RV32-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP29]], [[INDEX]] +; RV32-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP29]] +; RV32-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TMP0]] +; RV32-NEXT: br i1 [[TMP30]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; RV32: [[MIDDLE_BLOCK]]: -; RV32-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] -; RV32-NEXT: br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]], label %[[SCALAR_PH]] +; RV32-NEXT: br label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]] ; RV32: [[SCALAR_PH]]: -; RV32-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP11]], %[[MIDDLE_BLOCK]] ], [ [[TMP0]], %[[FOR_BODY_PREHEADER]] ], [ [[TMP0]], %[[VECTOR_MEMCHECK]] ] -; RV32-NEXT: [[BC_RESUME_VAL5:%.*]] = phi i32 [ [[TMP12]], %[[MIDDLE_BLOCK]] ], [ [[N]], %[[FOR_BODY_PREHEADER]] ], [ [[N]], %[[VECTOR_MEMCHECK]] ] +; RV32-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP0]], %[[FOR_BODY_PREHEADER]] ], [ [[TMP0]], %[[VECTOR_MEMCHECK]] ] +; RV32-NEXT: [[BC_RESUME_VAL4:%.*]] = phi i32 [ [[N]], %[[FOR_BODY_PREHEADER]] ], [ [[N]], %[[VECTOR_MEMCHECK]] ] ; RV32-NEXT: br label %[[FOR_BODY:.*]] ; RV32: [[FOR_COND_CLEANUP_LOOPEXIT]]: ; RV32-NEXT: br label %[[FOR_COND_CLEANUP]] @@ -667,15 +625,13 @@ define void @vector_reverse_f32(ptr nocapture noundef writeonly %A, ptr nocaptur ; RV64-UF2-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP14]], [[TMP13]] ; RV64-UF2-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; RV64-UF2: [[VECTOR_PH]]: -; RV64-UF2-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64() -; RV64-UF2-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP15]], 8 -; RV64-UF2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP16]] -; RV64-UF2-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]] ; RV64-UF2-NEXT: [[TMP17:%.*]] = call i64 @llvm.vscale.i64() ; RV64-UF2-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP17]], 4 ; RV64-UF2-NEXT: [[TMP19:%.*]] = mul i64 [[TMP18]], 2 +; RV64-UF2-NEXT: [[N_VEC:%.*]] = urem i64 [[TMP0]], [[TMP19]] ; RV64-UF2-NEXT: [[TMP20:%.*]] = sub i64 [[TMP0]], [[N_VEC]] -; RV64-UF2-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i32 +; RV64-UF2-NEXT: [[TMP48:%.*]] = sub i64 [[TMP0]], [[TMP20]] +; RV64-UF2-NEXT: [[DOTCAST:%.*]] = trunc i64 [[TMP20]] to i32 ; RV64-UF2-NEXT: [[TMP21:%.*]] = sub i32 [[N]], [[DOTCAST]] ; RV64-UF2-NEXT: br label %[[VECTOR_BODY:.*]] ; RV64-UF2: [[VECTOR_BODY]]: @@ -717,13 +673,13 @@ define void @vector_reverse_f32(ptr nocapture noundef writeonly %A, ptr nocaptur ; RV64-UF2-NEXT: [[REVERSE7:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[TMP36]]) ; RV64-UF2-NEXT: store <vscale x 4 x float> [[REVERSE7]], ptr [[TMP47]], align 4 ; RV64-UF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP19]] -; RV64-UF2-NEXT: [[TMP48:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; RV64-UF2-NEXT: br i1 [[TMP48]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; RV64-UF2-NEXT: [[TMP49:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[TMP20]] +; RV64-UF2-NEXT: br i1 [[TMP49]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; RV64-UF2: [[MIDDLE_BLOCK]]: -; RV64-UF2-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] +; RV64-UF2-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[TMP20]] ; RV64-UF2-NEXT: br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]], label %[[SCALAR_PH]] ; RV64-UF2: [[SCALAR_PH]]: -; RV64-UF2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP20]], %[[MIDDLE_BLOCK]] ], [ [[TMP0]], %[[FOR_BODY_PREHEADER]] ], [ [[TMP0]], %[[VECTOR_SCEVCHECK]] ], [ [[TMP0]], %[[VECTOR_MEMCHECK]] ] +; RV64-UF2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP48]], %[[MIDDLE_BLOCK]] ], [ [[TMP0]], %[[FOR_BODY_PREHEADER]] ], [ [[TMP0]], %[[VECTOR_SCEVCHECK]] ], [ [[TMP0]], %[[VECTOR_MEMCHECK]] ] ; RV64-UF2-NEXT: [[BC_RESUME_VAL8:%.*]] = phi i32 [ [[TMP21]], %[[MIDDLE_BLOCK]] ], [ [[N]], %[[FOR_BODY_PREHEADER]] ], [ [[N]], %[[VECTOR_SCEVCHECK]] ], [ [[N]], %[[VECTOR_MEMCHECK]] ] ; RV64-UF2-NEXT: br label %[[FOR_BODY:.*]] ; RV64-UF2: [[FOR_COND_CLEANUP_LOOPEXIT]]: @@ -762,98 +718,88 @@ define void @vector_reverse_f32_simplify(ptr noalias %A, ptr noalias %B) { ; RV64-LABEL: define void @vector_reverse_f32_simplify( ; RV64-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0]] { ; RV64-NEXT: [[ENTRY:.*]]: -; RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; RV64-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; RV64-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1023, [[TMP1]] -; RV64-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; RV64-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; RV64: [[VECTOR_PH]]: -; RV64-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; RV64-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; RV64-NEXT: [[N_MOD_VF:%.*]] = urem i64 1023, [[TMP3]] -; RV64-NEXT: [[N_VEC:%.*]] = sub i64 1023, [[N_MOD_VF]] ; RV64-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; RV64-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; RV64-NEXT: [[TMP6:%.*]] = sub i64 1023, [[N_VEC]] ; RV64-NEXT: br label %[[VECTOR_BODY:.*]] ; RV64: [[VECTOR_BODY]]: -; RV64-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; RV64-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; RV64-NEXT: [[AVL:%.*]] = phi i64 [ 1023, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; RV64-NEXT: [[TMP19:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; RV64-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1023, [[INDEX]] ; RV64-NEXT: [[TMP7:%.*]] = add nsw i64 [[OFFSET_IDX]], -1 ; RV64-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP7]] -; RV64-NEXT: [[TMP9:%.*]] = mul i64 0, [[TMP5]] -; RV64-NEXT: [[TMP10:%.*]] = sub i64 [[TMP5]], 1 +; RV64-NEXT: [[TMP24:%.*]] = zext i32 [[TMP19]] to i64 +; RV64-NEXT: [[TMP9:%.*]] = mul i64 0, [[TMP24]] +; RV64-NEXT: [[TMP10:%.*]] = sub i64 [[TMP24]], 1 ; RV64-NEXT: [[TMP11:%.*]] = mul i64 -1, [[TMP10]] -; RV64-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i64 [[TMP9]] -; RV64-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i64 [[TMP11]] -; RV64-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP13]], align 4 -; RV64-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD]]) +; RV64-NEXT: [[TMP12:%.*]] = getelementptr float, ptr [[TMP8]], i64 [[TMP9]] +; RV64-NEXT: [[TMP13:%.*]] = getelementptr float, ptr [[TMP12]], i64 [[TMP11]] +; RV64-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP13]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP19]]) +; RV64-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x float> @llvm.experimental.vp.reverse.nxv4f32(<vscale x 4 x float> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP19]]) ; RV64-NEXT: [[TMP14:%.*]] = fadd <vscale x 4 x float> [[REVERSE]], splat (float 1.000000e+00) ; RV64-NEXT: [[TMP15:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP7]] -; RV64-NEXT: [[TMP16:%.*]] = mul i64 0, [[TMP5]] -; RV64-NEXT: [[TMP17:%.*]] = sub i64 [[TMP5]], 1 +; RV64-NEXT: [[TMP16:%.*]] = zext i32 [[TMP19]] to i64 +; RV64-NEXT: [[TMP25:%.*]] = mul i64 0, [[TMP16]] +; RV64-NEXT: [[TMP17:%.*]] = sub i64 [[TMP16]], 1 ; RV64-NEXT: [[TMP18:%.*]] = mul i64 -1, [[TMP17]] -; RV64-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[TMP15]], i64 [[TMP16]] -; RV64-NEXT: [[TMP20:%.*]] = getelementptr inbounds float, ptr [[TMP19]], i64 [[TMP18]] -; RV64-NEXT: [[REVERSE1:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[TMP14]]) -; RV64-NEXT: store <vscale x 4 x float> [[REVERSE1]], ptr [[TMP20]], align 4 -; RV64-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; RV64-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; RV64-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; RV64-NEXT: [[TMP20:%.*]] = getelementptr float, ptr [[TMP15]], i64 [[TMP25]] +; RV64-NEXT: [[TMP21:%.*]] = getelementptr float, ptr [[TMP20]], i64 [[TMP18]] +; RV64-NEXT: [[VP_REVERSE1:%.*]] = call <vscale x 4 x float> @llvm.experimental.vp.reverse.nxv4f32(<vscale x 4 x float> [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP19]]) +; RV64-NEXT: call void @llvm.vp.store.nxv4f32.p0(<vscale x 4 x float> [[VP_REVERSE1]], ptr align 4 [[TMP21]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP19]]) +; RV64-NEXT: [[TMP22:%.*]] = zext i32 [[TMP19]] to i64 +; RV64-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP22]], [[INDEX]] +; RV64-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP22]] +; RV64-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1023 +; RV64-NEXT: br i1 [[TMP23]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; RV64: [[MIDDLE_BLOCK]]: -; RV64-NEXT: [[CMP_N:%.*]] = icmp eq i64 1023, [[N_VEC]] -; RV64-NEXT: br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[SCALAR_PH]] +; RV64-NEXT: br [[EXIT:label %.*]] ; RV64: [[SCALAR_PH]]: -; RV64-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP6]], %[[MIDDLE_BLOCK]] ], [ 1023, %[[ENTRY]] ] +; RV64-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 1023, %[[ENTRY]] ] ; RV64-NEXT: br label %[[FOR_BODY:.*]] ; RV64: [[FOR_BODY]]: ; ; RV32-LABEL: define void @vector_reverse_f32_simplify( ; RV32-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0]] { ; RV32-NEXT: [[ENTRY:.*]]: -; RV32-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; RV32-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; RV32-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1023, [[TMP1]] -; RV32-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; RV32-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; RV32: [[VECTOR_PH]]: -; RV32-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; RV32-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; RV32-NEXT: [[N_MOD_VF:%.*]] = urem i64 1023, [[TMP3]] -; RV32-NEXT: [[N_VEC:%.*]] = sub i64 1023, [[N_MOD_VF]] ; RV32-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; RV32-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; RV32-NEXT: [[TMP6:%.*]] = sub i64 1023, [[N_VEC]] ; RV32-NEXT: br label %[[VECTOR_BODY:.*]] ; RV32: [[VECTOR_BODY]]: -; RV32-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; RV32-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; RV32-NEXT: [[AVL:%.*]] = phi i64 [ 1023, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; RV32-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; RV32-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1023, [[INDEX]] ; RV32-NEXT: [[TMP7:%.*]] = add nsw i64 [[OFFSET_IDX]], -1 ; RV32-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP7]] -; RV32-NEXT: [[TMP9:%.*]] = trunc i64 [[TMP5]] to i32 ; RV32-NEXT: [[TMP10:%.*]] = mul i32 0, [[TMP9]] ; RV32-NEXT: [[TMP11:%.*]] = sub i32 [[TMP9]], 1 ; RV32-NEXT: [[TMP12:%.*]] = mul i32 -1, [[TMP11]] -; RV32-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 [[TMP10]] -; RV32-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i32 [[TMP12]] -; RV32-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP14]], align 4 -; RV32-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD]]) +; RV32-NEXT: [[TMP13:%.*]] = getelementptr float, ptr [[TMP8]], i32 [[TMP10]] +; RV32-NEXT: [[TMP14:%.*]] = getelementptr float, ptr [[TMP13]], i32 [[TMP12]] +; RV32-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]]) +; RV32-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x float> @llvm.experimental.vp.reverse.nxv4f32(<vscale x 4 x float> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]]) ; RV32-NEXT: [[TMP15:%.*]] = fadd <vscale x 4 x float> [[REVERSE]], splat (float 1.000000e+00) ; RV32-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP7]] -; RV32-NEXT: [[TMP17:%.*]] = trunc i64 [[TMP5]] to i32 -; RV32-NEXT: [[TMP18:%.*]] = mul i32 0, [[TMP17]] -; RV32-NEXT: [[TMP19:%.*]] = sub i32 [[TMP17]], 1 +; RV32-NEXT: [[TMP17:%.*]] = mul i32 0, [[TMP9]] +; RV32-NEXT: [[TMP19:%.*]] = sub i32 [[TMP9]], 1 ; RV32-NEXT: [[TMP20:%.*]] = mul i32 -1, [[TMP19]] -; RV32-NEXT: [[TMP21:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i32 [[TMP18]] -; RV32-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, ptr [[TMP21]], i32 [[TMP20]] -; RV32-NEXT: [[REVERSE1:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[TMP15]]) -; RV32-NEXT: store <vscale x 4 x float> [[REVERSE1]], ptr [[TMP22]], align 4 -; RV32-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; RV32-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; RV32-NEXT: br i1 [[TMP23]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; RV32-NEXT: [[TMP18:%.*]] = getelementptr float, ptr [[TMP16]], i32 [[TMP17]] +; RV32-NEXT: [[TMP22:%.*]] = getelementptr float, ptr [[TMP18]], i32 [[TMP20]] +; RV32-NEXT: [[VP_REVERSE1:%.*]] = call <vscale x 4 x float> @llvm.experimental.vp.reverse.nxv4f32(<vscale x 4 x float> [[TMP15]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]]) +; RV32-NEXT: call void @llvm.vp.store.nxv4f32.p0(<vscale x 4 x float> [[VP_REVERSE1]], ptr align 4 [[TMP22]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]]) +; RV32-NEXT: [[TMP23:%.*]] = zext i32 [[TMP9]] to i64 +; RV32-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP23]], [[INDEX]] +; RV32-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP23]] +; RV32-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1023 +; RV32-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; RV32: [[MIDDLE_BLOCK]]: -; RV32-NEXT: [[CMP_N:%.*]] = icmp eq i64 1023, [[N_VEC]] -; RV32-NEXT: br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[SCALAR_PH]] +; RV32-NEXT: br [[EXIT:label %.*]] ; RV32: [[SCALAR_PH]]: -; RV32-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP6]], %[[MIDDLE_BLOCK]] ], [ 1023, %[[ENTRY]] ] +; RV32-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 1023, %[[ENTRY]] ] ; RV32-NEXT: br label %[[FOR_BODY:.*]] ; RV32: [[FOR_BODY]]: ; @@ -865,14 +811,12 @@ define void @vector_reverse_f32_simplify(ptr noalias %A, ptr noalias %B) { ; RV64-UF2-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1023, [[TMP1]] ; RV64-UF2-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; RV64-UF2: [[VECTOR_PH]]: -; RV64-UF2-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; RV64-UF2-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 -; RV64-UF2-NEXT: [[N_MOD_VF:%.*]] = urem i64 1023, [[TMP3]] -; RV64-UF2-NEXT: [[N_VEC:%.*]] = sub i64 1023, [[N_MOD_VF]] ; RV64-UF2-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; RV64-UF2-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; RV64-UF2-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 2 +; RV64-UF2-NEXT: [[N_VEC:%.*]] = urem i64 1023, [[TMP6]] ; RV64-UF2-NEXT: [[TMP7:%.*]] = sub i64 1023, [[N_VEC]] +; RV64-UF2-NEXT: [[TMP33:%.*]] = sub i64 1023, [[TMP7]] ; RV64-UF2-NEXT: br label %[[VECTOR_BODY:.*]] ; RV64-UF2: [[VECTOR_BODY]]: ; RV64-UF2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -911,13 +855,13 @@ define void @vector_reverse_f32_simplify(ptr noalias %A, ptr noalias %B) { ; RV64-UF2-NEXT: [[REVERSE4:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[TMP21]]) ; RV64-UF2-NEXT: store <vscale x 4 x float> [[REVERSE4]], ptr [[TMP32]], align 4 ; RV64-UF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] -; RV64-UF2-NEXT: [[TMP33:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; RV64-UF2-NEXT: br i1 [[TMP33]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; RV64-UF2-NEXT: [[TMP34:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[TMP7]] +; RV64-UF2-NEXT: br i1 [[TMP34]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; RV64-UF2: [[MIDDLE_BLOCK]]: -; RV64-UF2-NEXT: [[CMP_N:%.*]] = icmp eq i64 1023, [[N_VEC]] +; RV64-UF2-NEXT: [[CMP_N:%.*]] = icmp eq i64 1023, [[TMP7]] ; RV64-UF2-NEXT: br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[SCALAR_PH]] ; RV64-UF2: [[SCALAR_PH]]: -; RV64-UF2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP7]], %[[MIDDLE_BLOCK]] ], [ 1023, %[[ENTRY]] ] +; RV64-UF2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP33]], %[[MIDDLE_BLOCK]] ], [ 1023, %[[ENTRY]] ] ; RV64-UF2-NEXT: br label %[[FOR_BODY:.*]] ; RV64-UF2: [[FOR_BODY]]: ; @@ -984,7 +928,7 @@ define void @vector_reverse_irregular_type(ptr noalias %A, ptr noalias %B) { ; RV64-NEXT: store i7 [[TMP28]], ptr [[TMP24]], align 1 ; RV64-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; RV64-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1020 -; RV64-NEXT: br i1 [[TMP29]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; RV64-NEXT: br i1 [[TMP29]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; RV64: [[MIDDLE_BLOCK]]: ; RV64-NEXT: br label %[[SCALAR_PH]] ; RV64: [[SCALAR_PH]]: @@ -1036,7 +980,7 @@ define void @vector_reverse_irregular_type(ptr noalias %A, ptr noalias %B) { ; RV32-NEXT: store i7 [[TMP28]], ptr [[TMP24]], align 1 ; RV32-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; RV32-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1020 -; RV32-NEXT: br i1 [[TMP29]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; RV32-NEXT: br i1 [[TMP29]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; RV32: [[MIDDLE_BLOCK]]: ; RV32-NEXT: br label %[[SCALAR_PH]] ; RV32: [[SCALAR_PH]]: diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/safe-dep-distance.ll b/llvm/test/Transforms/LoopVectorize/RISCV/safe-dep-distance.ll index 3370e92..10d74c0 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/safe-dep-distance.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/safe-dep-distance.ll @@ -10,36 +10,32 @@ target triple = "riscv64" define void @test(ptr %p) { ; CHECK-LABEL: @test( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 200, [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 200, [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 200, [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 200, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP8:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP7]], align 32 +; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 32 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP8]]) ; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], 200 ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP9]] -; CHECK-NEXT: store <vscale x 2 x i64> [[WIDE_LOAD]], ptr [[TMP10]], align 32 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[VP_OP_LOAD]], ptr align 32 [[TMP10]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP8]]) +; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP8]] to i64 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP11]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]] +; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 200 ; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 200, [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] +; CHECK-NEXT: br label [[EXIT:%.*]] ; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] ; CHECK-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV]] ; CHECK-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 32 ; CHECK-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 200 @@ -47,7 +43,7 @@ define void @test(ptr %p) { ; CHECK-NEXT: store i64 [[V]], ptr [[A2]], align 32 ; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 ; CHECK-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 199 -; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -86,7 +82,7 @@ define void @test_may_clobber(ptr %p) { ; CHECK-NEXT: store <4 x i64> [[WIDE_LOAD]], ptr [[TMP4]], align 32 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 200 -; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[EXIT:%.*]] ; CHECK: scalar.ph: @@ -101,7 +97,7 @@ define void @test_may_clobber(ptr %p) { ; CHECK-NEXT: store i64 [[V]], ptr [[A2]], align 32 ; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 ; CHECK-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 199 -; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -127,36 +123,32 @@ exit: define void @trivial_due_max_vscale(ptr %p) { ; CHECK-LABEL: @trivial_due_max_vscale( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 200, [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 200, [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 200, [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 200, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP8:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP7]], align 32 +; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 32 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP8]]) ; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], 8192 ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP9]] -; CHECK-NEXT: store <vscale x 2 x i64> [[WIDE_LOAD]], ptr [[TMP10]], align 32 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[VP_OP_LOAD]], ptr align 32 [[TMP10]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP8]]) +; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP8]] to i64 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP11]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]] +; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 200 +; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 200, [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] +; CHECK-NEXT: br label [[EXIT:%.*]] ; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] ; CHECK-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV]] ; CHECK-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 32 ; CHECK-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 8192 @@ -164,7 +156,7 @@ define void @trivial_due_max_vscale(ptr %p) { ; CHECK-NEXT: store i64 [[V]], ptr [[A2]], align 32 ; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 ; CHECK-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 199 -; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -190,36 +182,32 @@ exit: define void @no_high_lmul_or_interleave(ptr %p) { ; CHECK-LABEL: @no_high_lmul_or_interleave( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 200, [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 200, [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 200, [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 200, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP8:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP7]], align 32 +; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 32 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP8]]) ; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], 1024 ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP9]] -; CHECK-NEXT: store <vscale x 2 x i64> [[WIDE_LOAD]], ptr [[TMP10]], align 32 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[VP_OP_LOAD]], ptr align 32 [[TMP10]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP8]]) +; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP8]] to i64 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP11]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]] +; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 200 +; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 200, [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] +; CHECK-NEXT: br label [[EXIT:%.*]] ; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] ; CHECK-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV]] ; CHECK-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 32 ; CHECK-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 1024 @@ -227,7 +215,7 @@ define void @no_high_lmul_or_interleave(ptr %p) { ; CHECK-NEXT: store i64 [[V]], ptr [[A2]], align 32 ; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 ; CHECK-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 199 -; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP9:![0-9]+]] +; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -277,7 +265,7 @@ define void @safe_load_store_distance_not_pow_of_2(i64 %N) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <8 x i64> [[VEC_IND]], splat (i64 24) ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[SCALAR_PH]] ; CHECK: scalar.ph: @@ -291,7 +279,7 @@ define void @safe_load_store_distance_not_pow_of_2(i64 %N) { ; CHECK-NEXT: store i16 0, ptr [[GEP_OFF]], align 2 ; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 3 ; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[IV]], [[N]] -; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT:%.*]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT:%.*]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK: exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/scalable-basics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/scalable-basics.ll index e51f6fa..8bfeac8 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/scalable-basics.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/scalable-basics.ll @@ -8,15 +8,8 @@ define void @vector_add(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-LABEL: @vector_add( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V:%.*]], i64 0 @@ -24,28 +17,31 @@ define void @vector_add(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP6]], align 8 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP6]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]]) ; CHECK-NEXT: [[TMP8:%.*]] = add <vscale x 2 x i64> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] -; CHECK-NEXT: store <vscale x 2 x i64> [[TMP8]], ptr [[TMP6]], align 8 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP8]], ptr align 8 [[TMP6]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]]) +; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP11]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]] +; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] +; CHECK-NEXT: br label [[FOR_END:%.*]] ; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] ; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 ; CHECK-NEXT: [[ADD:%.*]] = add i64 [[ELEM]], [[V]] ; CHECK-NEXT: store i64 [[ADD]], ptr [[ARRAYIDX]], align 8 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -72,15 +68,8 @@ for.end: define void @vector_add_i32(ptr noalias nocapture %a, i32 %v, i64 %n) { ; CHECK-LABEL: @vector_add_i32( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[V:%.*]], i64 0 @@ -88,28 +77,31 @@ define void @vector_add_i32(ptr noalias nocapture %a, i32 %v, i64 %n) { ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]]) ; CHECK-NEXT: [[TMP8:%.*]] = add <vscale x 4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] -; CHECK-NEXT: store <vscale x 4 x i32> [[TMP8]], ptr [[TMP6]], align 4 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP8]], ptr align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]]) +; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP11]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]] +; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] +; CHECK-NEXT: br label [[FOR_END:%.*]] ; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] ; CHECK-NEXT: [[ELEM:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[ADD:%.*]] = add i32 [[ELEM]], [[V]] ; CHECK-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -174,15 +166,8 @@ for.end: define void @indexed_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i64 %v, i64 %n) { ; CHECK-LABEL: @indexed_store( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V:%.*]], i64 0 @@ -190,28 +175,31 @@ define void @indexed_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP6]], align 8 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP6]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]]) ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], <vscale x 2 x i64> [[WIDE_LOAD]] -; CHECK-NEXT: call void @llvm.masked.scatter.nxv2i64.nxv2p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], <vscale x 2 x ptr> [[TMP8]], i32 8, <vscale x 2 x i1> splat (i1 true)) -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-NEXT: call void @llvm.vp.scatter.nxv2i64.nxv2p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], <vscale x 2 x ptr> align 8 [[TMP8]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]]) +; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP11]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]] +; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] +; CHECK-NEXT: br label [[FOR_END:%.*]] ; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[BADDR:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[IV]] ; CHECK-NEXT: [[AIDX:%.*]] = load i64, ptr [[BADDR]], align 8 ; CHECK-NEXT: [[AADDR:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[AIDX]] ; CHECK-NEXT: store i64 [[V]], ptr [[AADDR]], align 8 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -235,40 +223,37 @@ for.end: define i64 @indexed_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i64 %v, i64 %n) { ; CHECK-LABEL: @indexed_load( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP9:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP6]], align 8 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP6]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]]) ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], <vscale x 2 x i64> [[WIDE_LOAD]] -; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64.nxv2p0(<vscale x 2 x ptr> [[TMP8]], i32 8, <vscale x 2 x i1> splat (i1 true), <vscale x 2 x i64> poison) -; CHECK-NEXT: [[TMP9]] = add <vscale x 2 x i64> [[VEC_PHI]], [[WIDE_MASKED_GATHER]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i64> @llvm.vp.gather.nxv2i64.nxv2p0(<vscale x 2 x ptr> align 8 [[TMP8]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]]) +; CHECK-NEXT: [[TMP13:%.*]] = add <vscale x 2 x i64> [[VEC_PHI]], [[WIDE_MASKED_GATHER]] +; CHECK-NEXT: [[TMP9]] = call <vscale x 2 x i64> @llvm.vp.merge.nxv2i64(<vscale x 2 x i1> splat (i1 true), <vscale x 2 x i64> [[TMP13]], <vscale x 2 x i64> [[VEC_PHI]], i32 [[TMP12]]) +; CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP12]] to i64 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP10]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]] +; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP11:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> [[TMP9]]) -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] +; CHECK-NEXT: br label [[FOR_END:%.*]] ; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] -; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ [[TMP11]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ 0, [[ENTRY]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[SUM:%.*]] = phi i64 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[SUM_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[SUM:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[SUM_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[BADDR:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[IV]] ; CHECK-NEXT: [[AIDX:%.*]] = load i64, ptr [[BADDR]], align 8 ; CHECK-NEXT: [[AADDR:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[AIDX]] @@ -276,7 +261,7 @@ define i64 @indexed_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i64 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[SUM_NEXT]] = add i64 [[SUM]], [[ELEM]] ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: for.end: ; CHECK-NEXT: [[SUM_NEXT_LCSSA:%.*]] = phi i64 [ [[SUM_NEXT]], [[FOR_BODY]] ], [ [[TMP11]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i64 [[SUM_NEXT_LCSSA]] @@ -303,15 +288,8 @@ for.end: define void @splat_int(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-LABEL: @splat_int( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V:%.*]], i64 0 @@ -319,24 +297,27 @@ define void @splat_int(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] -; CHECK-NEXT: store <vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr [[TMP6]], align 8 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr align 8 [[TMP6]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]]) +; CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP10]] to i64 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP7]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]] +; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] +; CHECK-NEXT: br label [[FOR_END:%.*]] ; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] ; CHECK-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -358,15 +339,8 @@ for.end: define void @splat_ptr(ptr noalias nocapture %a, ptr %v, i64 %n) { ; CHECK-LABEL: @splat_ptr( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x ptr> poison, ptr [[V:%.*]], i64 0 @@ -374,24 +348,27 @@ define void @splat_ptr(ptr noalias nocapture %a, ptr %v, i64 %n) { ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] -; CHECK-NEXT: store <vscale x 2 x ptr> [[BROADCAST_SPLAT]], ptr [[TMP6]], align 8 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-NEXT: call void @llvm.vp.store.nxv2p0.p0(<vscale x 2 x ptr> [[BROADCAST_SPLAT]], ptr align 8 [[TMP6]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]]) +; CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP10]] to i64 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP7]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]] +; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] +; CHECK-NEXT: br label [[FOR_END:%.*]] ; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] ; CHECK-NEXT: store ptr [[V]], ptr [[ARRAYIDX]], align 8 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll b/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll index c037b70..c807891 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll @@ -11,12 +11,6 @@ define void @vector_add(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-NEXT: entry: ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[TMP1]], 1 -; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP4]] -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V:%.*]], i64 0 @@ -76,12 +70,6 @@ define void @indexed_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i ; CHECK-NEXT: entry: ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[TMP1]], 1 -; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP4]] -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V:%.*]], i64 0 @@ -139,12 +127,6 @@ define i64 @indexed_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i64 ; CHECK-NEXT: entry: ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[TMP1]], 1 -; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP4]] -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] @@ -210,12 +192,6 @@ define void @splat_int(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-NEXT: entry: ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[TMP1]], 1 -; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP4]] -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V:%.*]], i64 0 @@ -267,12 +243,6 @@ define void @uniform_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i ; CHECK-NEXT: entry: ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[TMP1]], 1 -; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP4]] -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V:%.*]], i64 0 @@ -334,8 +304,6 @@ define i64 @uniform_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i64 ; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] -; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] @@ -344,7 +312,7 @@ define i64 @uniform_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i64 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[IV]] ; CHECK-NEXT: store <vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], [[TMP5]] +; CHECK-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], [[TMP3]] ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK: middle.block: @@ -387,12 +355,6 @@ define void @vector_add_trip1024(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-NEXT: entry: ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[TMP1]], 1 -; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP4]] -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V:%.*]], i64 0 diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/select-cmp-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/select-cmp-reduction.ll index 5c6febc..384cba5d2 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/select-cmp-reduction.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/select-cmp-reduction.ll @@ -5,50 +5,46 @@ define i32 @select_icmp(i32 %x, i32 %y, ptr nocapture readonly %c, i64 %n) { ; CHECK-LABEL: define i32 @select_icmp( ; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]], ptr readonly captures(none) [[C:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] { ; CHECK-NEXT: [[ENTRY:.*]]: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[X]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]]) ; CHECK-NEXT: [[TMP7:%.*]] = icmp sge <vscale x 4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] -; CHECK-NEXT: [[TMP8]] = or <vscale x 4 x i1> [[VEC_PHI]], [[TMP7]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-NEXT: [[TMP8]] = call <vscale x 4 x i1> @llvm.vp.merge.nxv4i1(<vscale x 4 x i1> [[TMP7]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i1> [[VEC_PHI]], i32 [[TMP14]]) +; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP14]] to i64 +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] +; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP10:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP8]]) ; CHECK-NEXT: [[TMP11:%.*]] = freeze i1 [[TMP10]] ; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP11]], i32 [[Y]], i32 0 -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]] +; CHECK-NEXT: br label %[[FOR_END:.*]] ; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[FOR_BODY:.*]] ; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[A:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[COND:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[A:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[COND:%.*]], %[[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDVARS_IV]] ; CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[TMP12]], [[X]] ; CHECK-NEXT: [[COND]] = select i1 [[CMP1]], i32 [[A]], i32 [[Y]] ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: [[FOR_END]]: ; CHECK-NEXT: [[COND_LCSSA:%.*]] = phi i32 [ [[COND]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i32 [[COND_LCSSA]] @@ -75,50 +71,46 @@ define i32 @select_fcmp(float %x, i32 %y, ptr nocapture readonly %c, i64 %n) { ; CHECK-LABEL: define i32 @select_fcmp( ; CHECK-SAME: float [[X:%.*]], i32 [[Y:%.*]], ptr readonly captures(none) [[C:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*]]: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x float> poison, float [[X]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x float> [[BROADCAST_SPLATINSERT]], <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[C]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP6]], align 4 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]]) ; CHECK-NEXT: [[TMP7:%.*]] = fcmp fast uge <vscale x 4 x float> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] -; CHECK-NEXT: [[TMP8]] = or <vscale x 4 x i1> [[VEC_PHI]], [[TMP7]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: [[TMP8]] = call <vscale x 4 x i1> @llvm.vp.merge.nxv4i1(<vscale x 4 x i1> [[TMP7]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i1> [[VEC_PHI]], i32 [[TMP14]]) +; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP14]] to i64 +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] +; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP10:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP8]]) ; CHECK-NEXT: [[TMP11:%.*]] = freeze i1 [[TMP10]] ; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP11]], i32 [[Y]], i32 0 -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]] +; CHECK-NEXT: br label %[[FOR_END:.*]] ; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[FOR_BODY:.*]] ; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[A:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[COND:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[A:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[COND:%.*]], %[[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[C]], i64 [[INDVARS_IV]] ; CHECK-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[CMP1:%.*]] = fcmp fast olt float [[TMP12]], [[X]] ; CHECK-NEXT: [[COND]] = select i1 [[CMP1]], i32 [[A]], i32 [[Y]] ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: [[FOR_END]]: ; CHECK-NEXT: [[COND_LCSSA:%.*]] = phi i32 [ [[COND]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i32 [[COND_LCSSA]] @@ -145,48 +137,44 @@ define i32 @select_const_i32_from_icmp(ptr nocapture readonly %v, i64 %n) { ; CHECK-LABEL: define i32 @select_const_i32_from_icmp( ; CHECK-SAME: ptr readonly captures(none) [[V:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*]]: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP21:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[V]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP21]]) ; CHECK-NEXT: [[TMP7:%.*]] = icmp ne <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 3) -; CHECK-NEXT: [[TMP8]] = or <vscale x 4 x i1> [[VEC_PHI]], [[TMP7]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-NEXT: [[TMP8]] = call <vscale x 4 x i1> @llvm.vp.merge.nxv4i1(<vscale x 4 x i1> [[TMP7]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i1> [[VEC_PHI]], i32 [[TMP21]]) +; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP21]] to i64 +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] +; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP10:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP8]]) ; CHECK-NEXT: [[TMP11:%.*]] = freeze i1 [[TMP10]] ; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP11]], i32 7, i32 3 -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; CHECK-NEXT: br label %[[EXIT:.*]] ; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ], [ 3, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 3, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[FOR_BODY:.*]] ; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[TMP12:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[TMP18:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[TMP13:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[TMP17:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[TMP12:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[TMP18:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[TMP13:%.*]] = phi i32 [ 3, %[[SCALAR_PH]] ], [ [[TMP17:%.*]], %[[FOR_BODY]] ] ; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[V]], i64 [[TMP12]] ; CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 4 ; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i32 [[TMP15]], 3 ; CHECK-NEXT: [[TMP17]] = select i1 [[TMP16]], i32 [[TMP13]], i32 7 ; CHECK-NEXT: [[TMP18]] = add nuw nsw i64 [[TMP12]], 1 ; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[TMP18]], [[N]] -; CHECK-NEXT: br i1 [[TMP19]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP19]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i32 [ [[TMP17]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i32 [[DOTLCSSA]] @@ -213,48 +201,44 @@ define i32 @select_i32_from_icmp(ptr nocapture readonly %v, i32 %a, i32 %b, i64 ; CHECK-LABEL: define i32 @select_i32_from_icmp( ; CHECK-SAME: ptr readonly captures(none) [[V:%.*]], i32 [[A:%.*]], i32 [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*]]: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP21:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[V]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP21]]) ; CHECK-NEXT: [[TMP7:%.*]] = icmp ne <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 3) -; CHECK-NEXT: [[TMP8]] = or <vscale x 4 x i1> [[VEC_PHI]], [[TMP7]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: [[TMP8]] = call <vscale x 4 x i1> @llvm.vp.merge.nxv4i1(<vscale x 4 x i1> [[TMP7]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i1> [[VEC_PHI]], i32 [[TMP21]]) +; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP21]] to i64 +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] +; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP10:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP8]]) ; CHECK-NEXT: [[TMP11:%.*]] = freeze i1 [[TMP10]] ; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP11]], i32 [[B]], i32 [[A]] -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; CHECK-NEXT: br label %[[EXIT:.*]] ; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ], [ [[A]], %[[ENTRY]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[A]], %[[ENTRY]] ] ; CHECK-NEXT: br label %[[FOR_BODY:.*]] ; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[TMP12:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[TMP18:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[TMP13:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[TMP17:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[TMP12:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[TMP18:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[TMP13:%.*]] = phi i32 [ [[A]], %[[SCALAR_PH]] ], [ [[TMP17:%.*]], %[[FOR_BODY]] ] ; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[V]], i64 [[TMP12]] ; CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 4 ; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i32 [[TMP15]], 3 ; CHECK-NEXT: [[TMP17]] = select i1 [[TMP16]], i32 [[TMP13]], i32 [[B]] ; CHECK-NEXT: [[TMP18]] = add nuw nsw i64 [[TMP12]], 1 ; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[TMP18]], [[N]] -; CHECK-NEXT: br i1 [[TMP19]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP19]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i32 [ [[TMP17]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i32 [[DOTLCSSA]] @@ -281,48 +265,44 @@ define i32 @select_const_i32_from_fcmp(ptr nocapture readonly %v, i64 %n) { ; CHECK-LABEL: define i32 @select_const_i32_from_fcmp( ; CHECK-SAME: ptr readonly captures(none) [[V:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*]]: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP21:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[V]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP6]], align 4 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP21]]) ; CHECK-NEXT: [[TMP7:%.*]] = fcmp fast one <vscale x 4 x float> [[WIDE_LOAD]], splat (float 3.000000e+00) -; CHECK-NEXT: [[TMP8]] = or <vscale x 4 x i1> [[VEC_PHI]], [[TMP7]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-NEXT: [[TMP8]] = call <vscale x 4 x i1> @llvm.vp.merge.nxv4i1(<vscale x 4 x i1> [[TMP7]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i1> [[VEC_PHI]], i32 [[TMP21]]) +; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP21]] to i64 +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] +; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP10:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP8]]) ; CHECK-NEXT: [[TMP11:%.*]] = freeze i1 [[TMP10]] ; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP11]], i32 1, i32 2 -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; CHECK-NEXT: br label %[[EXIT:.*]] ; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ], [ 2, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 2, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[FOR_BODY:.*]] ; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[TMP12:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[TMP18:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[TMP13:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[TMP17:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[TMP12:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[TMP18:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[TMP13:%.*]] = phi i32 [ 2, %[[SCALAR_PH]] ], [ [[TMP17:%.*]], %[[FOR_BODY]] ] ; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[V]], i64 [[TMP12]] ; CHECK-NEXT: [[TMP15:%.*]] = load float, ptr [[TMP14]], align 4 ; CHECK-NEXT: [[TMP16:%.*]] = fcmp fast ueq float [[TMP15]], 3.000000e+00 ; CHECK-NEXT: [[TMP17]] = select i1 [[TMP16]], i32 [[TMP13]], i32 1 ; CHECK-NEXT: [[TMP18]] = add nuw nsw i64 [[TMP12]], 1 ; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[TMP18]], [[N]] -; CHECK-NEXT: br i1 [[TMP19]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP19]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i32 [ [[TMP17]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i32 [[DOTLCSSA]] @@ -386,45 +366,47 @@ define i32 @pred_select_const_i32_from_icmp(ptr noalias nocapture readonly %src1 ; CHECK-LABEL: define i32 @pred_select_const_i32_from_icmp( ; CHECK-SAME: ptr noalias readonly captures(none) [[SRC1:%.*]], ptr noalias readonly captures(none) [[SRC2:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*]]: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[PREDPHI:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP17:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP17]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer +; CHECK-NEXT: [[TMP18:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32() +; CHECK-NEXT: [[TMP19:%.*]] = icmp ult <vscale x 4 x i32> [[TMP18]], [[BROADCAST_SPLAT]] ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[SRC1]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP17]]) ; CHECK-NEXT: [[TMP7:%.*]] = icmp sgt <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 35) +; CHECK-NEXT: [[TMP20:%.*]] = select <vscale x 4 x i1> [[TMP19]], <vscale x 4 x i1> [[TMP7]], <vscale x 4 x i1> zeroinitializer ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i32, ptr [[SRC2]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[TMP8]], i32 4, <vscale x 4 x i1> [[TMP7]], <vscale x 4 x i32> poison) +; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP8]], <vscale x 4 x i1> [[TMP7]], i32 [[TMP17]]) ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq <vscale x 4 x i32> [[WIDE_MASKED_LOAD]], splat (i32 2) ; CHECK-NEXT: [[TMP10:%.*]] = or <vscale x 4 x i1> [[VEC_PHI]], [[TMP9]] -; CHECK-NEXT: [[PREDPHI]] = select <vscale x 4 x i1> [[TMP7]], <vscale x 4 x i1> [[TMP10]], <vscale x 4 x i1> [[VEC_PHI]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP11]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-NEXT: [[PREDPHI1:%.*]] = select <vscale x 4 x i1> [[TMP20]], <vscale x 4 x i1> [[TMP10]], <vscale x 4 x i1> [[VEC_PHI]] +; CHECK-NEXT: [[PREDPHI]] = call <vscale x 4 x i1> @llvm.vp.merge.nxv4i1(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i1> [[PREDPHI1]], <vscale x 4 x i1> [[VEC_PHI]], i32 [[TMP17]]) +; CHECK-NEXT: [[TMP21:%.*]] = zext i32 [[TMP17]] to i64 +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP21]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP21]] +; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP12:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[PREDPHI]]) ; CHECK-NEXT: [[TMP13:%.*]] = freeze i1 [[TMP12]] ; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP13]], i32 1, i32 0 -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END_LOOPEXIT:.*]], label %[[SCALAR_PH]] +; CHECK-NEXT: br label %[[FOR_END_LOOPEXIT:.*]] ; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[FOR_BODY:.*]] ; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[I_013:%.*]] = phi i64 [ [[INC:%.*]], %[[FOR_INC:.*]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] -; CHECK-NEXT: [[R_012:%.*]] = phi i32 [ [[R_1:%.*]], %[[FOR_INC]] ], [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ] +; CHECK-NEXT: [[I_013:%.*]] = phi i64 [ [[INC:%.*]], %[[FOR_INC:.*]] ], [ 0, %[[SCALAR_PH]] ] +; CHECK-NEXT: [[R_012:%.*]] = phi i32 [ [[R_1:%.*]], %[[FOR_INC]] ], [ 0, %[[SCALAR_PH]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[SRC1]], i64 [[I_013]] ; CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[TMP14]], 35 @@ -439,7 +421,7 @@ define i32 @pred_select_const_i32_from_icmp(ptr noalias nocapture readonly %src1 ; CHECK-NEXT: [[R_1]] = phi i32 [ [[R_012]], %[[FOR_BODY]] ], [ [[SPEC_SELECT]], %[[IF_THEN]] ] ; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_013]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END_LOOPEXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END_LOOPEXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK: [[FOR_END_LOOPEXIT]]: ; CHECK-NEXT: [[R_1_LCSSA:%.*]] = phi i32 [ [[R_1]], %[[FOR_INC]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i32 [[R_1_LCSSA]] diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/short-trip-count.ll b/llvm/test/Transforms/LoopVectorize/RISCV/short-trip-count.ll index 13a4b16..8c804e5 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/short-trip-count.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/short-trip-count.ll @@ -4,28 +4,16 @@ define void @small_trip_count_min_vlen_128(ptr nocapture %a) nounwind vscale_range(4,1024) { ; CHECK-LABEL: @small_trip_count_min_vlen_128( ; CHECK-NEXT: entry: -; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] -; CHECK: vector.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: vector.body: -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP1:%.*]], align 4 -; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], splat (i32 1) -; CHECK-NEXT: store <4 x i32> [[TMP2]], ptr [[TMP1]], align 4 -; CHECK-NEXT: br label [[MIDDLE_BLOCK:%.*]] -; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[LOOP1:%.*]] ; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], [[LOOP1]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 [[IV]] +; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], [[LOOP1]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[TMP1:%.*]], i32 [[IV]] ; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[GEP]], align 4 ; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[V]], 1 ; CHECK-NEXT: store i32 [[ADD]], ptr [[GEP]], align 4 ; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 ; CHECK-NEXT: [[COND:%.*]] = icmp eq i32 [[IV]], 3 -; CHECK-NEXT: br i1 [[COND]], label [[EXIT]], label [[LOOP1]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-NEXT: br i1 [[COND]], label [[EXIT:%.*]], label [[LOOP1]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -49,28 +37,16 @@ exit: define void @small_trip_count_min_vlen_32(ptr nocapture %a) nounwind vscale_range(1,1024) { ; CHECK-LABEL: @small_trip_count_min_vlen_32( ; CHECK-NEXT: entry: -; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] -; CHECK: vector.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: vector.body: -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP1:%.*]], align 4 -; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], splat (i32 1) -; CHECK-NEXT: store <4 x i32> [[TMP2]], ptr [[TMP1]], align 4 -; CHECK-NEXT: br label [[MIDDLE_BLOCK:%.*]] -; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[LOOP1:%.*]] ; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], [[LOOP1]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 [[IV]] +; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], [[LOOP1]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[TMP1:%.*]], i32 [[IV]] ; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[GEP]], align 4 ; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[V]], 1 ; CHECK-NEXT: store i32 [[ADD]], ptr [[GEP]], align 4 ; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 ; CHECK-NEXT: [[COND:%.*]] = icmp eq i32 [[IV]], 3 -; CHECK-NEXT: br i1 [[COND]], label [[EXIT]], label [[LOOP1]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK-NEXT: br i1 [[COND]], label [[EXIT:%.*]], label [[LOOP1]] ; CHECK: exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll b/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll index df907dc..aa90c8b 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll @@ -6,45 +6,41 @@ define void @single_constant_stride_int_scaled(ptr %p) { ; CHECK-LABEL: @single_constant_stride_int_scaled( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 1024, [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK-NEXT: br i1 false, label [[SCALAR_PH1:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] -; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[N_MOD_VF]], 0 -; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 [[TMP3]], i64 [[N_MOD_VF]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[TMP5]] ; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4 ; CHECK-NEXT: [[TMP8:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64() ; CHECK-NEXT: [[TMP10:%.*]] = mul <vscale x 4 x i64> [[TMP8]], splat (i64 1) ; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP10]] -; CHECK-NEXT: [[TMP13:%.*]] = mul i64 1, [[TMP7]] -; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP13]], i64 0 -; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) +; CHECK-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64 +; CHECK-NEXT: [[TMP9:%.*]] = mul i64 1, [[TMP12]] +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP9]], i64 0 +; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer ; CHECK-NEXT: [[TMP14:%.*]] = mul nuw nsw <vscale x 4 x i64> [[VEC_IND]], splat (i64 8) ; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[P:%.*]], <vscale x 4 x i64> [[TMP14]] -; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[TMP15]], i32 4, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison) +; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.vp.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP15]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]]) ; CHECK-NEXT: [[TMP16:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], splat (i32 1) -; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP16]], <vscale x 4 x ptr> [[TMP15]], i32 4, <vscale x 4 x i1> splat (i1 true)) -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]] +; CHECK-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP16]], <vscale x 4 x ptr> align 4 [[TMP15]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]]) +; CHECK-NEXT: [[TMP13:%.*]] = zext i32 [[TMP11]] to i64 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP13]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]] -; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[SCALAR_PH]] +; CHECK-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH1]] ], [ [[NEXTI:%.*]], [[LOOP]] ] ; CHECK-NEXT: [[OFFSET:%.*]] = mul nuw nsw i64 [[I]], 8 ; CHECK-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET]] ; CHECK-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4 @@ -52,7 +48,7 @@ define void @single_constant_stride_int_scaled(ptr %p) { ; CHECK-NEXT: store i32 [[Y0]], ptr [[Q0]], align 4 ; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1 ; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; CHECK-NEXT: br i1 [[DONE]], label [[EXIT:%.*]], label [[LOOP]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK-NEXT: br i1 [[DONE]], label [[SCALAR_PH]], label [[LOOP]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -77,46 +73,42 @@ exit: define void @single_constant_stride_int_iv(ptr %p) { ; CHECK-LABEL: @single_constant_stride_int_iv( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; CHECK-NEXT: [[IND_END:%.*]] = mul i64 [[N_VEC]], 64 ; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64() ; CHECK-NEXT: [[TMP8:%.*]] = mul <vscale x 4 x i64> [[TMP6]], splat (i64 64) ; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP8]] -; CHECK-NEXT: [[TMP11:%.*]] = mul i64 64, [[TMP5]] -; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP11]], i64 0 -; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) +; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP7]] to i64 +; CHECK-NEXT: [[TMP9:%.*]] = mul i64 64, [[TMP11]] +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP9]], i64 0 +; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer ; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i32, ptr [[P:%.*]], <vscale x 4 x i64> [[VEC_IND]] -; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[TMP12]], i32 4, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison) +; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.vp.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]]) ; CHECK-NEXT: [[TMP13:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], splat (i32 1) -; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP13]], <vscale x 4 x ptr> [[TMP12]], i32 4, <vscale x 4 x i1> splat (i1 true)) -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP13]], <vscale x 4 x ptr> align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]]) +; CHECK-NEXT: [[TMP14:%.*]] = zext i32 [[TMP7]] to i64 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP14]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP14]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]] -; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] +; CHECK-NEXT: br label [[EXIT:%.*]] ; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] -; CHECK-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i64 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i64 [ 0, [[ENTRY]] ] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[OFFSET:%.*]] = phi i64 [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[OFFSET_NEXT:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[OFFSET:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[OFFSET_NEXT:%.*]], [[LOOP]] ] ; CHECK-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET]] ; CHECK-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4 ; CHECK-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1 @@ -124,7 +116,7 @@ define void @single_constant_stride_int_iv(ptr %p) { ; CHECK-NEXT: [[OFFSET_NEXT]] = add nuw nsw i64 [[OFFSET]], 64 ; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1 ; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -151,55 +143,46 @@ exit: define void @single_constant_stride_ptr_iv(ptr %p) { ; CHECK-LABEL: @single_constant_stride_ptr_iv( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 1024, [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK-NEXT: br i1 false, label [[SCALAR_PH1:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] -; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[N_MOD_VF]], 0 -; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 [[TMP3]], i64 [[N_MOD_VF]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[TMP5]] ; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 -; CHECK-NEXT: [[TMP18:%.*]] = mul i64 [[N_VEC]], 8 -; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[P:%.*]], i64 [[TMP18]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[P]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[P:%.*]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP14:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64() ; CHECK-NEXT: [[TMP16:%.*]] = mul <vscale x 4 x i64> [[TMP14]], splat (i64 8) ; CHECK-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 4 x i64> [[TMP16]] -; CHECK-NEXT: [[TMP17:%.*]] = extractelement <vscale x 4 x ptr> [[VECTOR_GEP]], i32 0 -; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i32>, ptr [[TMP17]], align 4 -; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC]]) -; CHECK-NEXT: [[TMP19:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 0 +; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) +; CHECK-NEXT: [[TMP19:%.*]] = call <vscale x 4 x i32> @llvm.vp.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[VECTOR_GEP]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]]) ; CHECK-NEXT: [[TMP20:%.*]] = add <vscale x 4 x i32> [[TMP19]], splat (i32 1) -; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP20]], <vscale x 4 x ptr> [[VECTOR_GEP]], i32 4, <vscale x 4 x i1> splat (i1 true)) -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] -; CHECK-NEXT: [[TMP12:%.*]] = mul i64 8, [[TMP8]] +; CHECK-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP20]], <vscale x 4 x ptr> align 4 [[VECTOR_GEP]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]]) +; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP11]] to i64 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP9]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] +; CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP11]] to i64 +; CHECK-NEXT: [[TMP12:%.*]] = mul i64 8, [[TMP10]] ; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP12]] -; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[SCALAR_PH]] +; CHECK-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] -; CHECK-NEXT: [[BC_RESUME_VAL1:%.*]] = phi ptr [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[P]], [[ENTRY]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[BC_RESUME_VAL1:%.*]] = phi ptr [ [[P]], [[ENTRY]] ] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[PTR:%.*]] = phi ptr [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[PTR_NEXT:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH1]] ], [ [[NEXTI:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[PTR:%.*]] = phi ptr [ [[P]], [[SCALAR_PH1]] ], [ [[PTR_NEXT:%.*]], [[LOOP]] ] ; CHECK-NEXT: [[X0:%.*]] = load i32, ptr [[PTR]], align 4 ; CHECK-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1 ; CHECK-NEXT: store i32 [[Y0]], ptr [[PTR]], align 4 ; CHECK-NEXT: [[PTR_NEXT]] = getelementptr inbounds i8, ptr [[PTR]], i64 8 ; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1 ; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; CHECK-NEXT: br i1 [[DONE]], label [[EXIT:%.*]], label [[LOOP]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK-NEXT: br i1 [[DONE]], label [[SCALAR_PH]], label [[LOOP]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -225,36 +208,31 @@ exit: define void @single_stride_int_scaled(ptr %p, i64 %stride) { ; NOSTRIDED-LABEL: @single_stride_int_scaled( ; NOSTRIDED-NEXT: entry: -; NOSTRIDED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; NOSTRIDED-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; NOSTRIDED-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 8, i64 [[TMP1]]) -; NOSTRIDED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP2]] -; NOSTRIDED-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]] +; NOSTRIDED-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]] ; NOSTRIDED: vector.scevcheck: ; NOSTRIDED-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE:%.*]], 1 ; NOSTRIDED-NEXT: br i1 [[IDENT_CHECK]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] ; NOSTRIDED: vector.ph: -; NOSTRIDED-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() -; NOSTRIDED-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 -; NOSTRIDED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP4]] -; NOSTRIDED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; NOSTRIDED-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() ; NOSTRIDED-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4 ; NOSTRIDED-NEXT: br label [[VECTOR_BODY:%.*]] ; NOSTRIDED: vector.body: ; NOSTRIDED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; NOSTRIDED-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; NOSTRIDED-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; NOSTRIDED-NEXT: [[TMP8:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[INDEX]] -; NOSTRIDED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP8]], align 4 +; NOSTRIDED-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP8]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]]) ; NOSTRIDED-NEXT: [[TMP10:%.*]] = add <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 1) -; NOSTRIDED-NEXT: store <vscale x 4 x i32> [[TMP10]], ptr [[TMP8]], align 4 -; NOSTRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] -; NOSTRIDED-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; NOSTRIDED-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; NOSTRIDED-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP10]], ptr align 4 [[TMP8]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]]) +; NOSTRIDED-NEXT: [[TMP11:%.*]] = zext i32 [[TMP7]] to i64 +; NOSTRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP11]], [[INDEX]] +; NOSTRIDED-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]] +; NOSTRIDED-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; NOSTRIDED-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; NOSTRIDED: middle.block: -; NOSTRIDED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; NOSTRIDED-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] +; NOSTRIDED-NEXT: br label [[EXIT:%.*]] ; NOSTRIDED: scalar.ph: -; NOSTRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ] +; NOSTRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ] ; NOSTRIDED-NEXT: br label [[LOOP:%.*]] ; NOSTRIDED: loop: ; NOSTRIDED-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ] @@ -265,7 +243,7 @@ define void @single_stride_int_scaled(ptr %p, i64 %stride) { ; NOSTRIDED-NEXT: store i32 [[Y0]], ptr [[Q0]], align 4 ; NOSTRIDED-NEXT: [[NEXTI]] = add i64 [[I]], 1 ; NOSTRIDED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; NOSTRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP9:![0-9]+]] +; NOSTRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP10:![0-9]+]] ; NOSTRIDED: exit: ; NOSTRIDED-NEXT: ret void ; @@ -306,37 +284,32 @@ exit: define void @single_stride_int_iv(ptr %p, i64 %stride) { ; NOSTRIDED-LABEL: @single_stride_int_iv( ; NOSTRIDED-NEXT: entry: -; NOSTRIDED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; NOSTRIDED-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; NOSTRIDED-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 8, i64 [[TMP1]]) -; NOSTRIDED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP2]] -; NOSTRIDED-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]] +; NOSTRIDED-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]] ; NOSTRIDED: vector.scevcheck: ; NOSTRIDED-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE:%.*]], 1 ; NOSTRIDED-NEXT: br i1 [[IDENT_CHECK]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] ; NOSTRIDED: vector.ph: -; NOSTRIDED-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() -; NOSTRIDED-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 -; NOSTRIDED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP4]] -; NOSTRIDED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; NOSTRIDED-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() ; NOSTRIDED-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4 ; NOSTRIDED-NEXT: br label [[VECTOR_BODY:%.*]] ; NOSTRIDED: vector.body: ; NOSTRIDED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; NOSTRIDED-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; NOSTRIDED-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; NOSTRIDED-NEXT: [[TMP8:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[INDEX]] -; NOSTRIDED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP8]], align 4 +; NOSTRIDED-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP8]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]]) ; NOSTRIDED-NEXT: [[TMP10:%.*]] = add <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 1) -; NOSTRIDED-NEXT: store <vscale x 4 x i32> [[TMP10]], ptr [[TMP8]], align 4 -; NOSTRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] -; NOSTRIDED-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; NOSTRIDED-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; NOSTRIDED-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP10]], ptr align 4 [[TMP8]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]]) +; NOSTRIDED-NEXT: [[TMP11:%.*]] = zext i32 [[TMP7]] to i64 +; NOSTRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP11]], [[INDEX]] +; NOSTRIDED-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]] +; NOSTRIDED-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; NOSTRIDED-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; NOSTRIDED: middle.block: -; NOSTRIDED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; NOSTRIDED-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] +; NOSTRIDED-NEXT: br label [[EXIT:%.*]] ; NOSTRIDED: scalar.ph: -; NOSTRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ] -; NOSTRIDED-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ], [ 0, [[VECTOR_SCEVCHECK]] ] +; NOSTRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ] +; NOSTRIDED-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ 0, [[VECTOR_SCEVCHECK]] ] ; NOSTRIDED-NEXT: br label [[LOOP:%.*]] ; NOSTRIDED: loop: ; NOSTRIDED-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ] @@ -348,7 +321,7 @@ define void @single_stride_int_iv(ptr %p, i64 %stride) { ; NOSTRIDED-NEXT: [[OFFSET_NEXT]] = add nuw nsw i64 [[OFFSET]], [[STRIDE]] ; NOSTRIDED-NEXT: [[NEXTI]] = add i64 [[I]], 1 ; NOSTRIDED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; NOSTRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP11:![0-9]+]] +; NOSTRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP12:![0-9]+]] ; NOSTRIDED: exit: ; NOSTRIDED-NEXT: ret void ; @@ -429,11 +402,7 @@ define void @double_stride_int_scaled(ptr %p, ptr %p2, i64 %stride) { ; NOSTRIDED-NEXT: entry: ; NOSTRIDED-NEXT: [[P3:%.*]] = ptrtoint ptr [[P:%.*]] to i64 ; NOSTRIDED-NEXT: [[P21:%.*]] = ptrtoint ptr [[P2:%.*]] to i64 -; NOSTRIDED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; NOSTRIDED-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; NOSTRIDED-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]]) -; NOSTRIDED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP2]] -; NOSTRIDED-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]] +; NOSTRIDED-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]] ; NOSTRIDED: vector.scevcheck: ; NOSTRIDED-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE:%.*]], 1 ; NOSTRIDED-NEXT: br i1 [[IDENT_CHECK]], label [[SCALAR_PH]], label [[VECTOR_MEMCHECK:%.*]] @@ -445,28 +414,27 @@ define void @double_stride_int_scaled(ptr %p, ptr %p2, i64 %stride) { ; NOSTRIDED-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]] ; NOSTRIDED-NEXT: br i1 [[DIFF_CHECK]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] ; NOSTRIDED: vector.ph: -; NOSTRIDED-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() -; NOSTRIDED-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 -; NOSTRIDED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP8]] -; NOSTRIDED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; NOSTRIDED-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() ; NOSTRIDED-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 ; NOSTRIDED-NEXT: br label [[VECTOR_BODY:%.*]] ; NOSTRIDED: vector.body: ; NOSTRIDED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; NOSTRIDED-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; NOSTRIDED-NEXT: [[TMP16:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; NOSTRIDED-NEXT: [[TMP12:%.*]] = getelementptr i32, ptr [[P]], i64 [[INDEX]] -; NOSTRIDED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP12]], align 4 +; NOSTRIDED-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP16]]) ; NOSTRIDED-NEXT: [[TMP14:%.*]] = add <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 1) ; NOSTRIDED-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[P2]], i64 [[INDEX]] -; NOSTRIDED-NEXT: store <vscale x 4 x i32> [[TMP14]], ptr [[TMP15]], align 4 -; NOSTRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]] -; NOSTRIDED-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; NOSTRIDED-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; NOSTRIDED-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP14]], ptr align 4 [[TMP15]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP16]]) +; NOSTRIDED-NEXT: [[TMP13:%.*]] = zext i32 [[TMP16]] to i64 +; NOSTRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP13]], [[INDEX]] +; NOSTRIDED-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]] +; NOSTRIDED-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; NOSTRIDED-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; NOSTRIDED: middle.block: -; NOSTRIDED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; NOSTRIDED-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] +; NOSTRIDED-NEXT: br label [[EXIT:%.*]] ; NOSTRIDED: scalar.ph: -; NOSTRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ], [ 0, [[VECTOR_MEMCHECK]] ] +; NOSTRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ], [ 0, [[VECTOR_MEMCHECK]] ] ; NOSTRIDED-NEXT: br label [[LOOP:%.*]] ; NOSTRIDED: loop: ; NOSTRIDED-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ] @@ -478,17 +446,13 @@ define void @double_stride_int_scaled(ptr %p, ptr %p2, i64 %stride) { ; NOSTRIDED-NEXT: store i32 [[Y0]], ptr [[Q1]], align 4 ; NOSTRIDED-NEXT: [[NEXTI]] = add i64 [[I]], 1 ; NOSTRIDED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; NOSTRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP13:![0-9]+]] +; NOSTRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP14:![0-9]+]] ; NOSTRIDED: exit: ; NOSTRIDED-NEXT: ret void ; ; STRIDED-LABEL: @double_stride_int_scaled( ; STRIDED-NEXT: entry: -; STRIDED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; STRIDED-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; STRIDED-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 80, i64 [[TMP1]]) -; STRIDED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP2]] -; STRIDED-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]] +; STRIDED-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]] ; STRIDED: vector.scevcheck: ; STRIDED-NEXT: [[TMP24:%.*]] = shl i64 [[STRIDE:%.*]], 2 ; STRIDED-NEXT: [[TMP25:%.*]] = mul i64 [[STRIDE]], -4 @@ -537,10 +501,6 @@ define void @double_stride_int_scaled(ptr %p, ptr %p2, i64 %stride) { ; STRIDED-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] ; STRIDED-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] ; STRIDED: vector.ph: -; STRIDED-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() -; STRIDED-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 4 -; STRIDED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP9]] -; STRIDED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; STRIDED-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() ; STRIDED-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 4 ; STRIDED-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[STRIDE]], i64 0 @@ -548,28 +508,32 @@ define void @double_stride_int_scaled(ptr %p, ptr %p2, i64 %stride) { ; STRIDED-NEXT: [[TMP12:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64() ; STRIDED-NEXT: [[TMP14:%.*]] = mul <vscale x 4 x i64> [[TMP12]], splat (i64 1) ; STRIDED-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP14]] -; STRIDED-NEXT: [[TMP17:%.*]] = mul i64 1, [[TMP11]] -; STRIDED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP17]], i64 0 -; STRIDED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer ; STRIDED-NEXT: br label [[VECTOR_BODY:%.*]] ; STRIDED: vector.body: ; STRIDED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; STRIDED-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] +; STRIDED-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; STRIDED-NEXT: [[TMP43:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) +; STRIDED-NEXT: [[TMP44:%.*]] = zext i32 [[TMP43]] to i64 +; STRIDED-NEXT: [[TMP45:%.*]] = mul i64 1, [[TMP44]] +; STRIDED-NEXT: [[BROADCAST_SPLATINSERT9:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP45]], i64 0 +; STRIDED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT9]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer ; STRIDED-NEXT: [[TMP18:%.*]] = mul nuw nsw <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT1]] ; STRIDED-NEXT: [[TMP19:%.*]] = getelementptr i32, ptr [[P]], <vscale x 4 x i64> [[TMP18]] -; STRIDED-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[TMP19]], i32 4, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison), !alias.scope [[META8:![0-9]+]] +; STRIDED-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.vp.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP19]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP43]]), !alias.scope [[META9:![0-9]+]] ; STRIDED-NEXT: [[TMP20:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], splat (i32 1) ; STRIDED-NEXT: [[TMP21:%.*]] = getelementptr i32, ptr [[P2]], <vscale x 4 x i64> [[TMP18]] -; STRIDED-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP20]], <vscale x 4 x ptr> [[TMP21]], i32 4, <vscale x 4 x i1> splat (i1 true)), !alias.scope [[META11:![0-9]+]], !noalias [[META8]] -; STRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP11]] +; STRIDED-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP20]], <vscale x 4 x ptr> align 4 [[TMP21]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP43]]), !alias.scope [[META12:![0-9]+]], !noalias [[META9]] +; STRIDED-NEXT: [[TMP46:%.*]] = zext i32 [[TMP43]] to i64 +; STRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP46]], [[INDEX]] +; STRIDED-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP46]] ; STRIDED-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] -; STRIDED-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; STRIDED-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; STRIDED-NEXT: [[TMP41:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; STRIDED-NEXT: br i1 [[TMP41]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; STRIDED: middle.block: -; STRIDED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; STRIDED-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] +; STRIDED-NEXT: br label [[EXIT:%.*]] ; STRIDED: scalar.ph: -; STRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ], [ 0, [[VECTOR_MEMCHECK1]] ] +; STRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ], [ 0, [[VECTOR_MEMCHECK1]] ] ; STRIDED-NEXT: br label [[LOOP:%.*]] ; STRIDED: loop: ; STRIDED-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ] @@ -581,7 +545,7 @@ define void @double_stride_int_scaled(ptr %p, ptr %p2, i64 %stride) { ; STRIDED-NEXT: store i32 [[Y0]], ptr [[Q1]], align 4 ; STRIDED-NEXT: [[NEXTI]] = add i64 [[I]], 1 ; STRIDED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; STRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP14:![0-9]+]] +; STRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP15:![0-9]+]] ; STRIDED: exit: ; STRIDED-NEXT: ret void ; @@ -607,37 +571,32 @@ exit: define void @double_stride_int_iv(ptr %p, ptr %p2, i64 %stride) { ; NOSTRIDED-LABEL: @double_stride_int_iv( ; NOSTRIDED-NEXT: entry: -; NOSTRIDED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; NOSTRIDED-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; NOSTRIDED-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 8, i64 [[TMP1]]) -; NOSTRIDED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP2]] -; NOSTRIDED-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]] +; NOSTRIDED-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]] ; NOSTRIDED: vector.scevcheck: ; NOSTRIDED-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE:%.*]], 1 ; NOSTRIDED-NEXT: br i1 [[IDENT_CHECK]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] ; NOSTRIDED: vector.ph: -; NOSTRIDED-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() -; NOSTRIDED-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 -; NOSTRIDED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP4]] -; NOSTRIDED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; NOSTRIDED-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() ; NOSTRIDED-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4 ; NOSTRIDED-NEXT: br label [[VECTOR_BODY:%.*]] ; NOSTRIDED: vector.body: ; NOSTRIDED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; NOSTRIDED-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; NOSTRIDED-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; NOSTRIDED-NEXT: [[TMP8:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[INDEX]] -; NOSTRIDED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP8]], align 4 +; NOSTRIDED-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP8]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]]) ; NOSTRIDED-NEXT: [[TMP10:%.*]] = add <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 1) -; NOSTRIDED-NEXT: store <vscale x 4 x i32> [[TMP10]], ptr [[TMP8]], align 4 -; NOSTRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] -; NOSTRIDED-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; NOSTRIDED-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; NOSTRIDED-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP10]], ptr align 4 [[TMP8]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]]) +; NOSTRIDED-NEXT: [[TMP11:%.*]] = zext i32 [[TMP7]] to i64 +; NOSTRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP11]], [[INDEX]] +; NOSTRIDED-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]] +; NOSTRIDED-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; NOSTRIDED-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; NOSTRIDED: middle.block: -; NOSTRIDED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; NOSTRIDED-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] +; NOSTRIDED-NEXT: br label [[EXIT:%.*]] ; NOSTRIDED: scalar.ph: -; NOSTRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ] -; NOSTRIDED-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ], [ 0, [[VECTOR_SCEVCHECK]] ] +; NOSTRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ] +; NOSTRIDED-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ 0, [[VECTOR_SCEVCHECK]] ] ; NOSTRIDED-NEXT: br label [[LOOP:%.*]] ; NOSTRIDED: loop: ; NOSTRIDED-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ] @@ -650,7 +609,7 @@ define void @double_stride_int_iv(ptr %p, ptr %p2, i64 %stride) { ; NOSTRIDED-NEXT: [[OFFSET_NEXT]] = add nuw nsw i64 [[OFFSET]], [[STRIDE]] ; NOSTRIDED-NEXT: [[NEXTI]] = add i64 [[I]], 1 ; NOSTRIDED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; NOSTRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP15:![0-9]+]] +; NOSTRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP16:![0-9]+]] ; NOSTRIDED: exit: ; NOSTRIDED-NEXT: ret void ; @@ -692,7 +651,6 @@ exit: ret void } - define void @double_stride_ptr_iv(ptr %p, ptr %p2, i64 %stride) { ; NOSTRIDED-LABEL: @double_stride_ptr_iv( ; NOSTRIDED-NEXT: entry: @@ -714,11 +672,7 @@ define void @double_stride_ptr_iv(ptr %p, ptr %p2, i64 %stride) { ; ; STRIDED-LABEL: @double_stride_ptr_iv( ; STRIDED-NEXT: entry: -; STRIDED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; STRIDED-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; STRIDED-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 32, i64 [[TMP1]]) -; STRIDED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP2]] -; STRIDED-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]] +; STRIDED-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]] ; STRIDED: vector.memcheck: ; STRIDED-NEXT: [[TMP3:%.*]] = mul i64 [[STRIDE:%.*]], 1023 ; STRIDED-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[P2:%.*]], i64 [[TMP3]] @@ -738,21 +692,14 @@ define void @double_stride_ptr_iv(ptr %p, ptr %p2, i64 %stride) { ; STRIDED-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] ; STRIDED-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] ; STRIDED: vector.ph: -; STRIDED-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() -; STRIDED-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 4 -; STRIDED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP9]] -; STRIDED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; STRIDED-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64() ; STRIDED-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP12]], 4 -; STRIDED-NEXT: [[TMP10:%.*]] = mul i64 [[N_VEC]], [[STRIDE]] -; STRIDED-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP10]] -; STRIDED-NEXT: [[TMP11:%.*]] = mul i64 [[N_VEC]], [[STRIDE]] -; STRIDED-NEXT: [[IND_END7:%.*]] = getelementptr i8, ptr [[P2]], i64 [[TMP11]] ; STRIDED-NEXT: br label [[VECTOR_BODY:%.*]] ; STRIDED: vector.body: ; STRIDED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; STRIDED-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[P]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ] ; STRIDED-NEXT: [[POINTER_PHI11:%.*]] = phi ptr [ [[P2]], [[VECTOR_PH]] ], [ [[PTR_IND12:%.*]], [[VECTOR_BODY]] ] +; STRIDED-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; STRIDED-NEXT: [[TMP19:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64() ; STRIDED-NEXT: [[DOTSPLATINSERT9:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[STRIDE]], i64 0 ; STRIDED-NEXT: [[DOTSPLAT10:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT9]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer @@ -761,23 +708,27 @@ define void @double_stride_ptr_iv(ptr %p, ptr %p2, i64 %stride) { ; STRIDED-NEXT: [[TMP27:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64() ; STRIDED-NEXT: [[TMP21:%.*]] = mul <vscale x 4 x i64> [[TMP27]], [[DOTSPLAT10]] ; STRIDED-NEXT: [[VECTOR_GEP7:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 4 x i64> [[TMP21]] -; STRIDED-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[VECTOR_GEP7]], i32 4, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison), !alias.scope [[META15:![0-9]+]] +; STRIDED-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) +; STRIDED-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.vp.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[VECTOR_GEP7]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]]), !alias.scope [[META16:![0-9]+]] ; STRIDED-NEXT: [[TMP30:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], splat (i32 1) -; STRIDED-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP30]], <vscale x 4 x ptr> [[VECTOR_GEP]], i32 4, <vscale x 4 x i1> splat (i1 true)), !alias.scope [[META18:![0-9]+]], !noalias [[META15]] -; STRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP13]] -; STRIDED-NEXT: [[TMP25:%.*]] = mul i64 [[STRIDE]], [[TMP13]] +; STRIDED-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP30]], <vscale x 4 x ptr> align 4 [[VECTOR_GEP]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]]), !alias.scope [[META19:![0-9]+]], !noalias [[META16]] +; STRIDED-NEXT: [[TMP16:%.*]] = zext i32 [[TMP14]] to i64 +; STRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP16]], [[INDEX]] +; STRIDED-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]] +; STRIDED-NEXT: [[TMP20:%.*]] = zext i32 [[TMP14]] to i64 +; STRIDED-NEXT: [[TMP25:%.*]] = mul i64 [[STRIDE]], [[TMP20]] ; STRIDED-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP25]] -; STRIDED-NEXT: [[TMP17:%.*]] = mul i64 [[STRIDE]], [[TMP13]] +; STRIDED-NEXT: [[TMP22:%.*]] = zext i32 [[TMP14]] to i64 +; STRIDED-NEXT: [[TMP17:%.*]] = mul i64 [[STRIDE]], [[TMP22]] ; STRIDED-NEXT: [[PTR_IND12]] = getelementptr i8, ptr [[POINTER_PHI11]], i64 [[TMP17]] -; STRIDED-NEXT: [[TMP31:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; STRIDED-NEXT: br i1 [[TMP31]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; STRIDED-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; STRIDED-NEXT: br i1 [[TMP23]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] ; STRIDED: middle.block: -; STRIDED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; STRIDED-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] +; STRIDED-NEXT: br label [[EXIT:%.*]] ; STRIDED: scalar.ph: -; STRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ] -; STRIDED-NEXT: [[BC_RESUME_VAL6:%.*]] = phi ptr [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[P]], [[ENTRY]] ], [ [[P]], [[VECTOR_MEMCHECK]] ] -; STRIDED-NEXT: [[BC_RESUME_VAL8:%.*]] = phi ptr [ [[IND_END7]], [[MIDDLE_BLOCK]] ], [ [[P2]], [[ENTRY]] ], [ [[P2]], [[VECTOR_MEMCHECK]] ] +; STRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ] +; STRIDED-NEXT: [[BC_RESUME_VAL6:%.*]] = phi ptr [ [[P]], [[ENTRY]] ], [ [[P]], [[VECTOR_MEMCHECK]] ] +; STRIDED-NEXT: [[BC_RESUME_VAL8:%.*]] = phi ptr [ [[P2]], [[ENTRY]] ], [ [[P2]], [[VECTOR_MEMCHECK]] ] ; STRIDED-NEXT: br label [[LOOP:%.*]] ; STRIDED: loop: ; STRIDED-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ] @@ -790,7 +741,7 @@ define void @double_stride_ptr_iv(ptr %p, ptr %p2, i64 %stride) { ; STRIDED-NEXT: [[PTR2_NEXT]] = getelementptr inbounds i8, ptr [[PTR2]], i64 [[STRIDE]] ; STRIDED-NEXT: [[NEXTI]] = add i64 [[I]], 1 ; STRIDED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; STRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP21:![0-9]+]] +; STRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP22:![0-9]+]] ; STRIDED: exit: ; STRIDED-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-bin-unary-ops-args.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-bin-unary-ops-args.ll index 38e7832..f539ccf 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-bin-unary-ops-args.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-bin-unary-ops-args.ll @@ -22,12 +22,6 @@ define void @test_and(ptr nocapture %a, ptr nocapture readonly %b) { ; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP2]], [[TMP1]] ; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; IF-EVL: [[VECTOR_PH]]: -; IF-EVL-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16 -; IF-EVL-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 100, [[TMP7]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP6]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 16 ; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]] @@ -84,8 +78,6 @@ define void @test_and(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP6]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16 ; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] ; NO-VP: [[VECTOR_BODY]]: ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -94,7 +86,7 @@ define void @test_and(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-NEXT: [[TMP10:%.*]] = and <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 1) ; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]] ; NO-VP-NEXT: store <vscale x 16 x i8> [[TMP10]], ptr [[TMP11]], align 1 -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] ; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; NO-VP: [[MIDDLE_BLOCK]]: @@ -148,12 +140,6 @@ define void @test_or(ptr nocapture %a, ptr nocapture readonly %b) { ; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP2]], [[TMP1]] ; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; IF-EVL: [[VECTOR_PH]]: -; IF-EVL-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16 -; IF-EVL-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 100, [[TMP7]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP6]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 16 ; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]] @@ -210,8 +196,6 @@ define void @test_or(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP6]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16 ; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] ; NO-VP: [[VECTOR_BODY]]: ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -220,7 +204,7 @@ define void @test_or(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-NEXT: [[TMP10:%.*]] = or <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 1) ; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]] ; NO-VP-NEXT: store <vscale x 16 x i8> [[TMP10]], ptr [[TMP11]], align 1 -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] ; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; NO-VP: [[MIDDLE_BLOCK]]: @@ -274,12 +258,6 @@ define void @test_xor(ptr nocapture %a, ptr nocapture readonly %b) { ; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP2]], [[TMP1]] ; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; IF-EVL: [[VECTOR_PH]]: -; IF-EVL-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16 -; IF-EVL-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 100, [[TMP7]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP6]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 16 ; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]] @@ -336,8 +314,6 @@ define void @test_xor(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP6]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16 ; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] ; NO-VP: [[VECTOR_BODY]]: ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -346,7 +322,7 @@ define void @test_xor(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-NEXT: [[TMP10:%.*]] = xor <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 1) ; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]] ; NO-VP-NEXT: store <vscale x 16 x i8> [[TMP10]], ptr [[TMP11]], align 1 -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] ; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; NO-VP: [[MIDDLE_BLOCK]]: @@ -400,12 +376,6 @@ define void @test_shl(ptr nocapture %a, ptr nocapture readonly %b) { ; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP2]], [[TMP1]] ; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; IF-EVL: [[VECTOR_PH]]: -; IF-EVL-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16 -; IF-EVL-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 100, [[TMP7]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP6]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 16 ; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]] @@ -462,8 +432,6 @@ define void @test_shl(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP6]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16 ; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] ; NO-VP: [[VECTOR_BODY]]: ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -472,7 +440,7 @@ define void @test_shl(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-NEXT: [[TMP10:%.*]] = shl <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 1) ; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]] ; NO-VP-NEXT: store <vscale x 16 x i8> [[TMP10]], ptr [[TMP11]], align 1 -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] ; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; NO-VP: [[MIDDLE_BLOCK]]: @@ -526,12 +494,6 @@ define void @test_lshr(ptr nocapture %a, ptr nocapture readonly %b) { ; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP2]], [[TMP1]] ; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; IF-EVL: [[VECTOR_PH]]: -; IF-EVL-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16 -; IF-EVL-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 100, [[TMP7]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP6]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 16 ; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]] @@ -588,8 +550,6 @@ define void @test_lshr(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP6]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16 ; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] ; NO-VP: [[VECTOR_BODY]]: ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -598,7 +558,7 @@ define void @test_lshr(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-NEXT: [[TMP10:%.*]] = lshr <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 1) ; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]] ; NO-VP-NEXT: store <vscale x 16 x i8> [[TMP10]], ptr [[TMP11]], align 1 -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] ; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; NO-VP: [[MIDDLE_BLOCK]]: @@ -652,12 +612,6 @@ define void @test_ashr(ptr nocapture %a, ptr nocapture readonly %b) { ; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP2]], [[TMP1]] ; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; IF-EVL: [[VECTOR_PH]]: -; IF-EVL-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16 -; IF-EVL-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 100, [[TMP7]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP6]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 16 ; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]] @@ -714,8 +668,6 @@ define void @test_ashr(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP6]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16 ; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] ; NO-VP: [[VECTOR_BODY]]: ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -724,7 +676,7 @@ define void @test_ashr(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-NEXT: [[TMP10:%.*]] = ashr <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 1) ; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]] ; NO-VP-NEXT: store <vscale x 16 x i8> [[TMP10]], ptr [[TMP11]], align 1 -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] ; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; NO-VP: [[MIDDLE_BLOCK]]: @@ -778,12 +730,6 @@ define void @test_add(ptr nocapture %a, ptr nocapture readonly %b) { ; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP2]], [[TMP1]] ; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; IF-EVL: [[VECTOR_PH]]: -; IF-EVL-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16 -; IF-EVL-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 100, [[TMP7]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP6]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 16 ; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]] @@ -840,8 +786,6 @@ define void @test_add(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP6]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16 ; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] ; NO-VP: [[VECTOR_BODY]]: ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -850,7 +794,7 @@ define void @test_add(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-NEXT: [[TMP10:%.*]] = add <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 1) ; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]] ; NO-VP-NEXT: store <vscale x 16 x i8> [[TMP10]], ptr [[TMP11]], align 1 -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] ; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; NO-VP: [[MIDDLE_BLOCK]]: @@ -904,12 +848,6 @@ define void @test_sub(ptr nocapture %a, ptr nocapture readonly %b) { ; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP2]], [[TMP1]] ; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; IF-EVL: [[VECTOR_PH]]: -; IF-EVL-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16 -; IF-EVL-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 100, [[TMP7]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP6]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 16 ; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]] @@ -966,8 +904,6 @@ define void @test_sub(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP6]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16 ; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] ; NO-VP: [[VECTOR_BODY]]: ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -976,7 +912,7 @@ define void @test_sub(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-NEXT: [[TMP10:%.*]] = sub <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 1) ; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]] ; NO-VP-NEXT: store <vscale x 16 x i8> [[TMP10]], ptr [[TMP11]], align 1 -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] ; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; NO-VP: [[MIDDLE_BLOCK]]: @@ -1030,12 +966,6 @@ define void @test_mul(ptr nocapture %a, ptr nocapture readonly %b) { ; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP2]], [[TMP1]] ; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; IF-EVL: [[VECTOR_PH]]: -; IF-EVL-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16 -; IF-EVL-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 100, [[TMP7]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP6]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 16 ; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]] @@ -1092,8 +1022,6 @@ define void @test_mul(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP6]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16 ; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] ; NO-VP: [[VECTOR_BODY]]: ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -1102,7 +1030,7 @@ define void @test_mul(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-NEXT: [[TMP10:%.*]] = mul <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 3) ; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]] ; NO-VP-NEXT: store <vscale x 16 x i8> [[TMP10]], ptr [[TMP11]], align 1 -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] ; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; NO-VP: [[MIDDLE_BLOCK]]: @@ -1156,12 +1084,6 @@ define void @test_sdiv(ptr nocapture %a, ptr nocapture readonly %b) { ; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP2]], [[TMP1]] ; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; IF-EVL: [[VECTOR_PH]]: -; IF-EVL-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16 -; IF-EVL-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 100, [[TMP7]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP6]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 16 ; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]] @@ -1218,8 +1140,6 @@ define void @test_sdiv(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP6]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16 ; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] ; NO-VP: [[VECTOR_BODY]]: ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -1228,7 +1148,7 @@ define void @test_sdiv(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-NEXT: [[TMP10:%.*]] = sdiv <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 3) ; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]] ; NO-VP-NEXT: store <vscale x 16 x i8> [[TMP10]], ptr [[TMP11]], align 1 -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] ; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; NO-VP: [[MIDDLE_BLOCK]]: @@ -1282,12 +1202,6 @@ define void @test_udiv(ptr nocapture %a, ptr nocapture readonly %b) { ; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP2]], [[TMP1]] ; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; IF-EVL: [[VECTOR_PH]]: -; IF-EVL-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16 -; IF-EVL-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 100, [[TMP7]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP6]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 16 ; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]] @@ -1344,8 +1258,6 @@ define void @test_udiv(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP6]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16 ; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] ; NO-VP: [[VECTOR_BODY]]: ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -1354,7 +1266,7 @@ define void @test_udiv(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-NEXT: [[TMP10:%.*]] = udiv <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 3) ; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]] ; NO-VP-NEXT: store <vscale x 16 x i8> [[TMP10]], ptr [[TMP11]], align 1 -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] ; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] ; NO-VP: [[MIDDLE_BLOCK]]: @@ -1408,12 +1320,6 @@ define void @test_srem(ptr nocapture %a, ptr nocapture readonly %b) { ; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP2]], [[TMP1]] ; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; IF-EVL: [[VECTOR_PH]]: -; IF-EVL-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16 -; IF-EVL-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 100, [[TMP7]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP6]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 16 ; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]] @@ -1470,8 +1376,6 @@ define void @test_srem(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP6]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16 ; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] ; NO-VP: [[VECTOR_BODY]]: ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -1480,7 +1384,7 @@ define void @test_srem(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-NEXT: [[TMP10:%.*]] = srem <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 3) ; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]] ; NO-VP-NEXT: store <vscale x 16 x i8> [[TMP10]], ptr [[TMP11]], align 1 -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] ; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] ; NO-VP: [[MIDDLE_BLOCK]]: @@ -1534,12 +1438,6 @@ define void @test_urem(ptr nocapture %a, ptr nocapture readonly %b) { ; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP2]], [[TMP1]] ; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; IF-EVL: [[VECTOR_PH]]: -; IF-EVL-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16 -; IF-EVL-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 100, [[TMP7]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP6]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 16 ; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]] @@ -1596,8 +1494,6 @@ define void @test_urem(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP6]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16 ; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] ; NO-VP: [[VECTOR_BODY]]: ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -1606,7 +1502,7 @@ define void @test_urem(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-NEXT: [[TMP10:%.*]] = urem <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 3) ; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]] ; NO-VP-NEXT: store <vscale x 16 x i8> [[TMP10]], ptr [[TMP11]], align 1 -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] ; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] ; NO-VP: [[MIDDLE_BLOCK]]: @@ -1663,12 +1559,6 @@ define void @test_fadd(ptr nocapture %a, ptr nocapture readonly %b) { ; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP3]], [[TMP2]] ; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; IF-EVL: [[VECTOR_PH]]: -; IF-EVL-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4 -; IF-EVL-NEXT: [[TMP8:%.*]] = sub i64 [[TMP7]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 100, [[TMP8]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP7]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 ; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]] @@ -1726,8 +1616,6 @@ define void @test_fadd(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP8]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 ; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] ; NO-VP: [[VECTOR_BODY]]: ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -1736,7 +1624,7 @@ define void @test_fadd(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-NEXT: [[TMP12:%.*]] = fadd fast <vscale x 4 x float> [[WIDE_LOAD]], splat (float 3.000000e+00) ; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]] ; NO-VP-NEXT: store <vscale x 4 x float> [[TMP12]], ptr [[TMP13]], align 4 -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] ; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] ; NO-VP: [[MIDDLE_BLOCK]]: @@ -1791,12 +1679,6 @@ define void @test_fsub(ptr nocapture %a, ptr nocapture readonly %b) { ; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP3]], [[TMP2]] ; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; IF-EVL: [[VECTOR_PH]]: -; IF-EVL-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4 -; IF-EVL-NEXT: [[TMP8:%.*]] = sub i64 [[TMP7]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 100, [[TMP8]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP7]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 ; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]] @@ -1854,8 +1736,6 @@ define void @test_fsub(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP8]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 ; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] ; NO-VP: [[VECTOR_BODY]]: ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -1864,7 +1744,7 @@ define void @test_fsub(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-NEXT: [[TMP12:%.*]] = fsub fast <vscale x 4 x float> [[WIDE_LOAD]], splat (float 3.000000e+00) ; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]] ; NO-VP-NEXT: store <vscale x 4 x float> [[TMP12]], ptr [[TMP13]], align 4 -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] ; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]] ; NO-VP: [[MIDDLE_BLOCK]]: @@ -1919,12 +1799,6 @@ define void @test_fmul(ptr nocapture %a, ptr nocapture readonly %b) { ; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP3]], [[TMP2]] ; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; IF-EVL: [[VECTOR_PH]]: -; IF-EVL-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4 -; IF-EVL-NEXT: [[TMP8:%.*]] = sub i64 [[TMP7]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 100, [[TMP8]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP7]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 ; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]] @@ -1982,8 +1856,6 @@ define void @test_fmul(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP8]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 ; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] ; NO-VP: [[VECTOR_BODY]]: ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -1992,7 +1864,7 @@ define void @test_fmul(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-NEXT: [[TMP12:%.*]] = fmul fast <vscale x 4 x float> [[WIDE_LOAD]], splat (float 3.000000e+00) ; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]] ; NO-VP-NEXT: store <vscale x 4 x float> [[TMP12]], ptr [[TMP13]], align 4 -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] ; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]] ; NO-VP: [[MIDDLE_BLOCK]]: @@ -2047,12 +1919,6 @@ define void @test_fdiv(ptr nocapture %a, ptr nocapture readonly %b) { ; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP3]], [[TMP2]] ; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; IF-EVL: [[VECTOR_PH]]: -; IF-EVL-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4 -; IF-EVL-NEXT: [[TMP8:%.*]] = sub i64 [[TMP7]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 100, [[TMP8]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP7]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 ; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]] @@ -2110,8 +1976,6 @@ define void @test_fdiv(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP8]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 ; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] ; NO-VP: [[VECTOR_BODY]]: ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -2120,7 +1984,7 @@ define void @test_fdiv(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-NEXT: [[TMP12:%.*]] = fdiv fast <vscale x 4 x float> [[WIDE_LOAD]], splat (float 3.000000e+00) ; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]] ; NO-VP-NEXT: store <vscale x 4 x float> [[TMP12]], ptr [[TMP13]], align 4 -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] ; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP34:![0-9]+]] ; NO-VP: [[MIDDLE_BLOCK]]: @@ -2228,12 +2092,6 @@ define void @test_fneg(ptr nocapture %a, ptr nocapture readonly %b) { ; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP3]], [[TMP2]] ; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; IF-EVL: [[VECTOR_PH]]: -; IF-EVL-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4 -; IF-EVL-NEXT: [[TMP8:%.*]] = sub i64 [[TMP7]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 100, [[TMP8]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP7]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 ; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]] @@ -2291,8 +2149,6 @@ define void @test_fneg(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP8]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 ; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] ; NO-VP: [[VECTOR_BODY]]: ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -2301,7 +2157,7 @@ define void @test_fneg(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-NEXT: [[TMP12:%.*]] = fneg fast <vscale x 4 x float> [[WIDE_LOAD]] ; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]] ; NO-VP-NEXT: store <vscale x 4 x float> [[TMP12]], ptr [[TMP13]], align 4 -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] ; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]] ; NO-VP: [[MIDDLE_BLOCK]]: diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-call-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-call-intrinsics.ll index f604745..d2f3355 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-call-intrinsics.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-call-intrinsics.ll @@ -27,12 +27,6 @@ define void @vp_smax(ptr %a, ptr %b, ptr %c, i64 %N) { ; IF-EVL-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]] ; IF-EVL-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; IF-EVL: [[VECTOR_PH]]: -; IF-EVL-NEXT: [[TMP27:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP28:%.*]] = mul nuw i64 [[TMP27]], 4 -; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP28]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP6]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP28]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]] @@ -99,8 +93,6 @@ define void @vp_smax(ptr %a, ptr %b, ptr %c, i64 %N) { ; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP10]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 4 ; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] ; NO-VP: [[VECTOR_BODY]]: ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -111,7 +103,7 @@ define void @vp_smax(ptr %a, ptr %b, ptr %c, i64 %N) { ; NO-VP-NEXT: [[TMP15:%.*]] = call <vscale x 4 x i32> @llvm.smax.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> [[WIDE_LOAD5]]) ; NO-VP-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]] ; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP15]], ptr [[TMP16]], align 4 -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]] ; NO-VP-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; NO-VP: [[MIDDLE_BLOCK]]: @@ -176,12 +168,6 @@ define void @vp_smin(ptr %a, ptr %b, ptr %c, i64 %N) { ; IF-EVL-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]] ; IF-EVL-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; IF-EVL: [[VECTOR_PH]]: -; IF-EVL-NEXT: [[TMP27:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP28:%.*]] = mul nuw i64 [[TMP27]], 4 -; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP28]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP6]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP28]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]] @@ -248,8 +234,6 @@ define void @vp_smin(ptr %a, ptr %b, ptr %c, i64 %N) { ; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP10]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 4 ; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] ; NO-VP: [[VECTOR_BODY]]: ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -260,7 +244,7 @@ define void @vp_smin(ptr %a, ptr %b, ptr %c, i64 %N) { ; NO-VP-NEXT: [[TMP15:%.*]] = call <vscale x 4 x i32> @llvm.smin.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> [[WIDE_LOAD5]]) ; NO-VP-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]] ; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP15]], ptr [[TMP16]], align 4 -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]] ; NO-VP-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; NO-VP: [[MIDDLE_BLOCK]]: @@ -325,12 +309,6 @@ define void @vp_umax(ptr %a, ptr %b, ptr %c, i64 %N) { ; IF-EVL-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]] ; IF-EVL-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; IF-EVL: [[VECTOR_PH]]: -; IF-EVL-NEXT: [[TMP27:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP28:%.*]] = mul nuw i64 [[TMP27]], 4 -; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP28]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP6]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP28]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]] @@ -397,8 +375,6 @@ define void @vp_umax(ptr %a, ptr %b, ptr %c, i64 %N) { ; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP10]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 4 ; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] ; NO-VP: [[VECTOR_BODY]]: ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -409,7 +385,7 @@ define void @vp_umax(ptr %a, ptr %b, ptr %c, i64 %N) { ; NO-VP-NEXT: [[TMP15:%.*]] = call <vscale x 4 x i32> @llvm.umax.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> [[WIDE_LOAD5]]) ; NO-VP-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]] ; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP15]], ptr [[TMP16]], align 4 -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]] ; NO-VP-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; NO-VP: [[MIDDLE_BLOCK]]: @@ -474,12 +450,6 @@ define void @vp_umin(ptr %a, ptr %b, ptr %c, i64 %N) { ; IF-EVL-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]] ; IF-EVL-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; IF-EVL: [[VECTOR_PH]]: -; IF-EVL-NEXT: [[TMP27:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP28:%.*]] = mul nuw i64 [[TMP27]], 4 -; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP28]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP6]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP28]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]] @@ -546,8 +516,6 @@ define void @vp_umin(ptr %a, ptr %b, ptr %c, i64 %N) { ; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP10]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 4 ; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] ; NO-VP: [[VECTOR_BODY]]: ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -558,7 +526,7 @@ define void @vp_umin(ptr %a, ptr %b, ptr %c, i64 %N) { ; NO-VP-NEXT: [[TMP15:%.*]] = call <vscale x 4 x i32> @llvm.umin.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> [[WIDE_LOAD5]]) ; NO-VP-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]] ; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP15]], ptr [[TMP16]], align 4 -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]] ; NO-VP-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; NO-VP: [[MIDDLE_BLOCK]]: @@ -619,12 +587,6 @@ define void @vp_ctlz(ptr %a, ptr %b, i64 %N) { ; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP21]], [[TMP20]] ; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; IF-EVL: [[VECTOR_PH]]: -; IF-EVL-NEXT: [[TMP22:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP23:%.*]] = mul nuw i64 [[TMP22]], 4 -; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP23]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP6]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP23]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]] @@ -682,8 +644,6 @@ define void @vp_ctlz(ptr %a, ptr %b, i64 %N) { ; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP8]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 ; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] ; NO-VP: [[VECTOR_BODY]]: ; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -692,7 +652,7 @@ define void @vp_ctlz(ptr %a, ptr %b, i64 %N) { ; NO-VP-NEXT: [[TMP12:%.*]] = call <vscale x 4 x i32> @llvm.ctlz.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]], i1 true) ; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] ; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP12]], ptr [[TMP13]], align 4 -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP10]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP8]] ; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; NO-VP: [[MIDDLE_BLOCK]]: @@ -748,12 +708,6 @@ define void @vp_cttz(ptr %a, ptr %b, i64 %N) { ; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP7]], [[TMP6]] ; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; IF-EVL: [[VECTOR_PH]]: -; IF-EVL-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 4 -; IF-EVL-NEXT: [[TMP10:%.*]] = sub i64 [[TMP9]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP10]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP9]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 4 ; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]] @@ -811,8 +765,6 @@ define void @vp_cttz(ptr %a, ptr %b, i64 %N) { ; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP8]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 ; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] ; NO-VP: [[VECTOR_BODY]]: ; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -821,7 +773,7 @@ define void @vp_cttz(ptr %a, ptr %b, i64 %N) { ; NO-VP-NEXT: [[TMP12:%.*]] = call <vscale x 4 x i32> @llvm.cttz.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]], i1 true) ; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] ; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP12]], ptr [[TMP13]], align 4 -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP10]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP8]] ; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; NO-VP: [[MIDDLE_BLOCK]]: @@ -877,12 +829,6 @@ define void @vp_lrint(ptr %a, ptr %b, i64 %N) { ; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP24]], [[TMP23]] ; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; IF-EVL: [[VECTOR_PH]]: -; IF-EVL-NEXT: [[TMP25:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP26:%.*]] = mul nuw i64 [[TMP25]], 4 -; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP26]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP6]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP26]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]] @@ -944,8 +890,6 @@ define void @vp_lrint(ptr %a, ptr %b, i64 %N) { ; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP8]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 ; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] ; NO-VP: [[VECTOR_BODY]]: ; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -956,7 +900,7 @@ define void @vp_lrint(ptr %a, ptr %b, i64 %N) { ; NO-VP-NEXT: [[TMP14:%.*]] = trunc <vscale x 4 x i64> [[TMP13]] to <vscale x 4 x i32> ; NO-VP-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] ; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP14]], ptr [[TMP15]], align 4 -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP10]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP8]] ; NO-VP-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; NO-VP: [[MIDDLE_BLOCK]]: @@ -1016,12 +960,6 @@ define void @vp_llrint(ptr %a, ptr %b, i64 %N) { ; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP24]], [[TMP23]] ; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; IF-EVL: [[VECTOR_PH]]: -; IF-EVL-NEXT: [[TMP25:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP26:%.*]] = mul nuw i64 [[TMP25]], 4 -; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP26]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP6]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP26]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]] @@ -1083,8 +1021,6 @@ define void @vp_llrint(ptr %a, ptr %b, i64 %N) { ; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP8]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 ; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] ; NO-VP: [[VECTOR_BODY]]: ; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -1095,7 +1031,7 @@ define void @vp_llrint(ptr %a, ptr %b, i64 %N) { ; NO-VP-NEXT: [[TMP14:%.*]] = trunc <vscale x 4 x i64> [[TMP13]] to <vscale x 4 x i32> ; NO-VP-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] ; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP14]], ptr [[TMP15]], align 4 -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP10]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP8]] ; NO-VP-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; NO-VP: [[MIDDLE_BLOCK]]: @@ -1155,12 +1091,6 @@ define void @vp_abs(ptr %a, ptr %b, i64 %N) { ; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP21]], [[TMP20]] ; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; IF-EVL: [[VECTOR_PH]]: -; IF-EVL-NEXT: [[TMP22:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP23:%.*]] = mul nuw i64 [[TMP22]], 4 -; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP23]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP6]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP23]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]] @@ -1218,8 +1148,6 @@ define void @vp_abs(ptr %a, ptr %b, i64 %N) { ; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP8]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 ; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] ; NO-VP: [[VECTOR_BODY]]: ; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -1228,7 +1156,7 @@ define void @vp_abs(ptr %a, ptr %b, i64 %N) { ; NO-VP-NEXT: [[TMP12:%.*]] = call <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]], i1 true) ; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] ; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP12]], ptr [[TMP13]], align 4 -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP10]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP8]] ; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; NO-VP: [[MIDDLE_BLOCK]]: diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll index 2be74e5..6db81b3 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll @@ -22,12 +22,6 @@ define void @vp_sext(ptr %a, ptr %b, i64 %N) { ; IF-EVL-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] ; IF-EVL-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; IF-EVL: [[VECTOR_PH]]: -; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 2 -; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[TMP8]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP9]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP8]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 2 ; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]] @@ -85,8 +79,6 @@ define void @vp_sext(ptr %a, ptr %b, i64 %N) { ; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP6]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 2 ; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] ; NO-VP: [[VECTOR_BODY]]: ; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -95,7 +87,7 @@ define void @vp_sext(ptr %a, ptr %b, i64 %N) { ; NO-VP-NEXT: [[TMP10:%.*]] = sext <vscale x 2 x i32> [[WIDE_LOAD]] to <vscale x 2 x i64> ; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] ; NO-VP-NEXT: store <vscale x 2 x i64> [[TMP10]], ptr [[TMP11]], align 8, !alias.scope [[META3:![0-9]+]], !noalias [[META0]] -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP8]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP6]] ; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; NO-VP: [[MIDDLE_BLOCK]]: @@ -150,12 +142,6 @@ define void @vp_zext(ptr %a, ptr %b, i64 %N) { ; IF-EVL-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] ; IF-EVL-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; IF-EVL: [[VECTOR_PH]]: -; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 2 -; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[TMP8]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP9]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP8]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 2 ; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]] @@ -213,8 +199,6 @@ define void @vp_zext(ptr %a, ptr %b, i64 %N) { ; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP6]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 2 ; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] ; NO-VP: [[VECTOR_BODY]]: ; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -223,7 +207,7 @@ define void @vp_zext(ptr %a, ptr %b, i64 %N) { ; NO-VP-NEXT: [[TMP10:%.*]] = zext <vscale x 2 x i32> [[WIDE_LOAD]] to <vscale x 2 x i64> ; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] ; NO-VP-NEXT: store <vscale x 2 x i64> [[TMP10]], ptr [[TMP11]], align 8, !alias.scope [[META12:![0-9]+]], !noalias [[META9]] -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP8]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP6]] ; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; NO-VP: [[MIDDLE_BLOCK]]: @@ -278,12 +262,6 @@ define void @vp_trunc(ptr %a, ptr %b, i64 %N) { ; IF-EVL-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] ; IF-EVL-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; IF-EVL: [[VECTOR_PH]]: -; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 2 -; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[TMP8]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP9]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP8]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 2 ; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]] @@ -341,8 +319,6 @@ define void @vp_trunc(ptr %a, ptr %b, i64 %N) { ; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP6]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 2 ; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] ; NO-VP: [[VECTOR_BODY]]: ; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -351,7 +327,7 @@ define void @vp_trunc(ptr %a, ptr %b, i64 %N) { ; NO-VP-NEXT: [[TMP10:%.*]] = trunc <vscale x 2 x i64> [[WIDE_LOAD]] to <vscale x 2 x i32> ; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] ; NO-VP-NEXT: store <vscale x 2 x i32> [[TMP10]], ptr [[TMP11]], align 4, !alias.scope [[META19:![0-9]+]], !noalias [[META16]] -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP8]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP6]] ; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] ; NO-VP: [[MIDDLE_BLOCK]]: @@ -406,12 +382,6 @@ define void @vp_fpext(ptr %a, ptr %b, i64 %N) { ; IF-EVL-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] ; IF-EVL-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; IF-EVL: [[VECTOR_PH]]: -; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 2 -; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[TMP8]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP9]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP8]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 2 ; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]] @@ -469,8 +439,6 @@ define void @vp_fpext(ptr %a, ptr %b, i64 %N) { ; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP6]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 2 ; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] ; NO-VP: [[VECTOR_BODY]]: ; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -479,7 +447,7 @@ define void @vp_fpext(ptr %a, ptr %b, i64 %N) { ; NO-VP-NEXT: [[TMP10:%.*]] = fpext <vscale x 2 x float> [[WIDE_LOAD]] to <vscale x 2 x double> ; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds double, ptr [[A]], i64 [[IV]] ; NO-VP-NEXT: store <vscale x 2 x double> [[TMP10]], ptr [[TMP11]], align 8, !alias.scope [[META26:![0-9]+]], !noalias [[META23]] -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP8]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP6]] ; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] ; NO-VP: [[MIDDLE_BLOCK]]: @@ -534,12 +502,6 @@ define void @vp_fptrunc(ptr %a, ptr %b, i64 %N) { ; IF-EVL-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] ; IF-EVL-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; IF-EVL: [[VECTOR_PH]]: -; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 2 -; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[TMP8]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP9]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP8]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 2 ; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]] @@ -597,8 +559,6 @@ define void @vp_fptrunc(ptr %a, ptr %b, i64 %N) { ; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP6]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 2 ; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] ; NO-VP: [[VECTOR_BODY]]: ; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -607,7 +567,7 @@ define void @vp_fptrunc(ptr %a, ptr %b, i64 %N) { ; NO-VP-NEXT: [[TMP10:%.*]] = fptrunc <vscale x 2 x double> [[WIDE_LOAD]] to <vscale x 2 x float> ; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] ; NO-VP-NEXT: store <vscale x 2 x float> [[TMP10]], ptr [[TMP11]], align 4, !alias.scope [[META33:![0-9]+]], !noalias [[META30]] -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP8]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP6]] ; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP35:![0-9]+]] ; NO-VP: [[MIDDLE_BLOCK]]: @@ -662,12 +622,6 @@ define void @vp_sitofp(ptr %a, ptr %b, i64 %N) { ; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP8]], [[TMP7]] ; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; IF-EVL: [[VECTOR_PH]]: -; IF-EVL-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 -; IF-EVL-NEXT: [[TMP11:%.*]] = sub i64 [[TMP10]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP11]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP10]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP12]], 4 ; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]] @@ -725,8 +679,6 @@ define void @vp_sitofp(ptr %a, ptr %b, i64 %N) { ; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP8]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 ; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] ; NO-VP: [[VECTOR_BODY]]: ; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -735,7 +687,7 @@ define void @vp_sitofp(ptr %a, ptr %b, i64 %N) { ; NO-VP-NEXT: [[TMP12:%.*]] = sitofp <vscale x 4 x i32> [[WIDE_LOAD]] to <vscale x 4 x float> ; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] ; NO-VP-NEXT: store <vscale x 4 x float> [[TMP12]], ptr [[TMP13]], align 4 -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP10]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP8]] ; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP37:![0-9]+]] ; NO-VP: [[MIDDLE_BLOCK]]: @@ -790,12 +742,6 @@ define void @vp_uitofp(ptr %a, ptr %b, i64 %N) { ; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP8]], [[TMP7]] ; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; IF-EVL: [[VECTOR_PH]]: -; IF-EVL-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 -; IF-EVL-NEXT: [[TMP11:%.*]] = sub i64 [[TMP10]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP11]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP10]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP12]], 4 ; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]] @@ -853,8 +799,6 @@ define void @vp_uitofp(ptr %a, ptr %b, i64 %N) { ; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP8]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 ; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] ; NO-VP: [[VECTOR_BODY]]: ; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -863,7 +807,7 @@ define void @vp_uitofp(ptr %a, ptr %b, i64 %N) { ; NO-VP-NEXT: [[TMP12:%.*]] = uitofp <vscale x 4 x i32> [[WIDE_LOAD]] to <vscale x 4 x float> ; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] ; NO-VP-NEXT: store <vscale x 4 x float> [[TMP12]], ptr [[TMP13]], align 4 -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP10]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP8]] ; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP39:![0-9]+]] ; NO-VP: [[MIDDLE_BLOCK]]: @@ -918,12 +862,6 @@ define void @vp_fptosi(ptr %a, ptr %b, i64 %N) { ; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP8]], [[TMP7]] ; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; IF-EVL: [[VECTOR_PH]]: -; IF-EVL-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 -; IF-EVL-NEXT: [[TMP11:%.*]] = sub i64 [[TMP10]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP11]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP10]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP12]], 4 ; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]] @@ -981,8 +919,6 @@ define void @vp_fptosi(ptr %a, ptr %b, i64 %N) { ; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP8]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 ; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] ; NO-VP: [[VECTOR_BODY]]: ; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -991,7 +927,7 @@ define void @vp_fptosi(ptr %a, ptr %b, i64 %N) { ; NO-VP-NEXT: [[TMP12:%.*]] = fptosi <vscale x 4 x float> [[WIDE_LOAD]] to <vscale x 4 x i32> ; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] ; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP12]], ptr [[TMP13]], align 4 -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP10]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP8]] ; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP41:![0-9]+]] ; NO-VP: [[MIDDLE_BLOCK]]: @@ -1046,12 +982,6 @@ define void @vp_fptoui(ptr %a, ptr %b, i64 %N) { ; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP8]], [[TMP7]] ; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; IF-EVL: [[VECTOR_PH]]: -; IF-EVL-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 -; IF-EVL-NEXT: [[TMP11:%.*]] = sub i64 [[TMP10]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP11]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP10]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP12]], 4 ; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]] @@ -1109,8 +1039,6 @@ define void @vp_fptoui(ptr %a, ptr %b, i64 %N) { ; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP8]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 ; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] ; NO-VP: [[VECTOR_BODY]]: ; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -1119,7 +1047,7 @@ define void @vp_fptoui(ptr %a, ptr %b, i64 %N) { ; NO-VP-NEXT: [[TMP12:%.*]] = fptoui <vscale x 4 x float> [[WIDE_LOAD]] to <vscale x 4 x i32> ; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] ; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP12]], ptr [[TMP13]], align 4 -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP10]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP8]] ; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP43:![0-9]+]] ; NO-VP: [[MIDDLE_BLOCK]]: @@ -1174,12 +1102,6 @@ define void @vp_inttoptr(ptr %a, ptr %b, i64 %N) { ; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP8]], [[TMP7]] ; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; IF-EVL: [[VECTOR_PH]]: -; IF-EVL-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 2 -; IF-EVL-NEXT: [[TMP11:%.*]] = sub i64 [[TMP10]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP11]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP10]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP12]], 2 ; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]] @@ -1237,8 +1159,6 @@ define void @vp_inttoptr(ptr %a, ptr %b, i64 %N) { ; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 2 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP8]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 2 ; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] ; NO-VP: [[VECTOR_BODY]]: ; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -1247,7 +1167,7 @@ define void @vp_inttoptr(ptr %a, ptr %b, i64 %N) { ; NO-VP-NEXT: [[TMP12:%.*]] = inttoptr <vscale x 2 x i64> [[WIDE_LOAD]] to <vscale x 2 x ptr> ; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds ptr, ptr [[A]], i64 [[IV]] ; NO-VP-NEXT: store <vscale x 2 x ptr> [[TMP12]], ptr [[TMP13]], align 8 -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP10]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP8]] ; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP45:![0-9]+]] ; NO-VP: [[MIDDLE_BLOCK]]: @@ -1293,12 +1213,6 @@ define void @vp_ptrtoint(ptr %a, ptr %b, i64 %N) { ; IF-EVL-NEXT: [[ENTRY:.*]]: ; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; IF-EVL: [[VECTOR_PH]]: -; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 -; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP6]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 2 ; IF-EVL-NEXT: [[TMP9:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64() @@ -1353,12 +1267,10 @@ define void @vp_ptrtoint(ptr %a, ptr %b, i64 %N) { ; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; NO-VP-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64() ; NO-VP-NEXT: [[TMP7:%.*]] = mul <vscale x 2 x i64> [[TMP6]], splat (i64 1) ; NO-VP-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP7]] -; NO-VP-NEXT: [[TMP8:%.*]] = mul i64 1, [[TMP5]] +; NO-VP-NEXT: [[TMP8:%.*]] = mul i64 1, [[TMP3]] ; NO-VP-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP8]], i64 0 ; NO-VP-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer ; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] @@ -1369,7 +1281,7 @@ define void @vp_ptrtoint(ptr %a, ptr %b, i64 %N) { ; NO-VP-NEXT: [[TMP10:%.*]] = ptrtoint <vscale x 2 x ptr> [[TMP9]] to <vscale x 2 x i64> ; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] ; NO-VP-NEXT: store <vscale x 2 x i64> [[TMP10]], ptr [[TMP11]], align 8 -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; NO-VP-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] ; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP47:![0-9]+]] diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll index 76a830a..0c67e47 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll @@ -23,12 +23,6 @@ define i32 @cond_add(ptr %a, i64 %n, i32 %start) { ; IF-EVL-OUTLOOP-NEXT: entry: ; IF-EVL-OUTLOOP-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; IF-EVL-OUTLOOP: vector.ph: -; IF-EVL-OUTLOOP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-OUTLOOP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; IF-EVL-OUTLOOP-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1 -; IF-EVL-OUTLOOP-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP6]] -; IF-EVL-OUTLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] -; IF-EVL-OUTLOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-OUTLOOP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-OUTLOOP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; IF-EVL-OUTLOOP-NEXT: [[TMP9:%.*]] = insertelement <vscale x 4 x i32> zeroinitializer, i32 [[START]], i32 0 @@ -76,12 +70,6 @@ define i32 @cond_add(ptr %a, i64 %n, i32 %start) { ; IF-EVL-INLOOP-NEXT: entry: ; IF-EVL-INLOOP-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; IF-EVL-INLOOP: vector.ph: -; IF-EVL-INLOOP-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-INLOOP-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4 -; IF-EVL-INLOOP-NEXT: [[TMP8:%.*]] = sub i64 [[TMP7]], 1 -; IF-EVL-INLOOP-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP8]] -; IF-EVL-INLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP7]] -; IF-EVL-INLOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-INLOOP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-INLOOP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 ; IF-EVL-INLOOP-NEXT: br label [[VECTOR_BODY:%.*]] @@ -134,8 +122,6 @@ define i32 @cond_add(ptr %a, i64 %n, i32 %start) { ; NO-VP-OUTLOOP-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4 ; NO-VP-OUTLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP7]] ; NO-VP-OUTLOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-OUTLOOP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-OUTLOOP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 ; NO-VP-OUTLOOP-NEXT: [[TMP11:%.*]] = insertelement <vscale x 4 x i32> zeroinitializer, i32 [[START]], i32 0 ; NO-VP-OUTLOOP-NEXT: br label [[VECTOR_BODY:%.*]] ; NO-VP-OUTLOOP: vector.body: @@ -146,7 +132,7 @@ define i32 @cond_add(ptr %a, i64 %n, i32 %start) { ; NO-VP-OUTLOOP-NEXT: [[TMP21:%.*]] = icmp sgt <vscale x 4 x i32> [[WIDE_MASKED_LOAD]], splat (i32 3) ; NO-VP-OUTLOOP-NEXT: [[TMP16:%.*]] = select <vscale x 4 x i1> [[TMP21]], <vscale x 4 x i32> [[WIDE_MASKED_LOAD]], <vscale x 4 x i32> zeroinitializer ; NO-VP-OUTLOOP-NEXT: [[TMP17]] = add <vscale x 4 x i32> [[TMP16]], [[VEC_PHI]] -; NO-VP-OUTLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]] +; NO-VP-OUTLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]] ; NO-VP-OUTLOOP-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-OUTLOOP-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; NO-VP-OUTLOOP: middle.block: @@ -184,8 +170,6 @@ define i32 @cond_add(ptr %a, i64 %n, i32 %start) { ; NO-VP-INLOOP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; NO-VP-INLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; NO-VP-INLOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-INLOOP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-INLOOP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; NO-VP-INLOOP-NEXT: br label [[VECTOR_BODY:%.*]] ; NO-VP-INLOOP: vector.body: ; NO-VP-INLOOP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -196,7 +180,7 @@ define i32 @cond_add(ptr %a, i64 %n, i32 %start) { ; NO-VP-INLOOP-NEXT: [[TMP10:%.*]] = select <vscale x 4 x i1> [[TMP9]], <vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> zeroinitializer ; NO-VP-INLOOP-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP10]]) ; NO-VP-INLOOP-NEXT: [[TMP12]] = add i32 [[VEC_PHI]], [[TMP11]] -; NO-VP-INLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-INLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; NO-VP-INLOOP-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-INLOOP-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; NO-VP-INLOOP: middle.block: @@ -246,12 +230,6 @@ define i32 @cond_add_pred(ptr %a, i64 %n, i32 %start) { ; IF-EVL-OUTLOOP-NEXT: entry: ; IF-EVL-OUTLOOP-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; IF-EVL-OUTLOOP: vector.ph: -; IF-EVL-OUTLOOP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-OUTLOOP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; IF-EVL-OUTLOOP-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1 -; IF-EVL-OUTLOOP-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP6]] -; IF-EVL-OUTLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] -; IF-EVL-OUTLOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-OUTLOOP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-OUTLOOP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; IF-EVL-OUTLOOP-NEXT: [[TMP9:%.*]] = insertelement <vscale x 4 x i32> zeroinitializer, i32 [[START]], i32 0 @@ -308,12 +286,6 @@ define i32 @cond_add_pred(ptr %a, i64 %n, i32 %start) { ; IF-EVL-INLOOP-NEXT: entry: ; IF-EVL-INLOOP-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; IF-EVL-INLOOP: vector.ph: -; IF-EVL-INLOOP-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-INLOOP-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4 -; IF-EVL-INLOOP-NEXT: [[TMP8:%.*]] = sub i64 [[TMP7]], 1 -; IF-EVL-INLOOP-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP8]] -; IF-EVL-INLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP7]] -; IF-EVL-INLOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-INLOOP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-INLOOP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 ; IF-EVL-INLOOP-NEXT: br label [[VECTOR_BODY:%.*]] @@ -369,8 +341,6 @@ define i32 @cond_add_pred(ptr %a, i64 %n, i32 %start) { ; NO-VP-OUTLOOP-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4 ; NO-VP-OUTLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP7]] ; NO-VP-OUTLOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-OUTLOOP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-OUTLOOP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 ; NO-VP-OUTLOOP-NEXT: [[TMP11:%.*]] = insertelement <vscale x 4 x i32> zeroinitializer, i32 [[START]], i32 0 ; NO-VP-OUTLOOP-NEXT: br label [[VECTOR_BODY:%.*]] ; NO-VP-OUTLOOP: vector.body: @@ -381,7 +351,7 @@ define i32 @cond_add_pred(ptr %a, i64 %n, i32 %start) { ; NO-VP-OUTLOOP-NEXT: [[TMP18:%.*]] = icmp sgt <vscale x 4 x i32> [[WIDE_MASKED_LOAD]], splat (i32 3) ; NO-VP-OUTLOOP-NEXT: [[TMP16:%.*]] = add <vscale x 4 x i32> [[VEC_PHI]], [[WIDE_MASKED_LOAD]] ; NO-VP-OUTLOOP-NEXT: [[PREDPHI]] = select <vscale x 4 x i1> [[TMP18]], <vscale x 4 x i32> [[TMP16]], <vscale x 4 x i32> [[VEC_PHI]] -; NO-VP-OUTLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]] +; NO-VP-OUTLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]] ; NO-VP-OUTLOOP-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-OUTLOOP-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; NO-VP-OUTLOOP: middle.block: @@ -423,8 +393,6 @@ define i32 @cond_add_pred(ptr %a, i64 %n, i32 %start) { ; NO-VP-INLOOP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; NO-VP-INLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; NO-VP-INLOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-INLOOP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-INLOOP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; NO-VP-INLOOP-NEXT: br label [[VECTOR_BODY:%.*]] ; NO-VP-INLOOP: vector.body: ; NO-VP-INLOOP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -435,7 +403,7 @@ define i32 @cond_add_pred(ptr %a, i64 %n, i32 %start) { ; NO-VP-INLOOP-NEXT: [[TMP10:%.*]] = select <vscale x 4 x i1> [[TMP9]], <vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> zeroinitializer ; NO-VP-INLOOP-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP10]]) ; NO-VP-INLOOP-NEXT: [[TMP12]] = add i32 [[VEC_PHI]], [[TMP11]] -; NO-VP-INLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-INLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; NO-VP-INLOOP-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-INLOOP-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; NO-VP-INLOOP: middle.block: @@ -495,12 +463,6 @@ define i32 @step_cond_add(ptr %a, i64 %n, i32 %start) { ; IF-EVL-OUTLOOP-NEXT: entry: ; IF-EVL-OUTLOOP-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; IF-EVL-OUTLOOP: vector.ph: -; IF-EVL-OUTLOOP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-OUTLOOP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; IF-EVL-OUTLOOP-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1 -; IF-EVL-OUTLOOP-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP6]] -; IF-EVL-OUTLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] -; IF-EVL-OUTLOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-OUTLOOP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-OUTLOOP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; IF-EVL-OUTLOOP-NEXT: [[TMP9:%.*]] = insertelement <vscale x 4 x i32> zeroinitializer, i32 [[START]], i32 0 @@ -557,12 +519,6 @@ define i32 @step_cond_add(ptr %a, i64 %n, i32 %start) { ; IF-EVL-INLOOP-NEXT: entry: ; IF-EVL-INLOOP-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; IF-EVL-INLOOP: vector.ph: -; IF-EVL-INLOOP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-INLOOP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; IF-EVL-INLOOP-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1 -; IF-EVL-INLOOP-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP6]] -; IF-EVL-INLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] -; IF-EVL-INLOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-INLOOP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-INLOOP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; IF-EVL-INLOOP-NEXT: [[TMP9:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32() @@ -624,13 +580,11 @@ define i32 @step_cond_add(ptr %a, i64 %n, i32 %start) { ; NO-VP-OUTLOOP-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4 ; NO-VP-OUTLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP7]] ; NO-VP-OUTLOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-OUTLOOP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-OUTLOOP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 ; NO-VP-OUTLOOP-NEXT: [[TMP11:%.*]] = insertelement <vscale x 4 x i32> zeroinitializer, i32 [[START]], i32 0 ; NO-VP-OUTLOOP-NEXT: [[TMP12:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32() ; NO-VP-OUTLOOP-NEXT: [[TMP14:%.*]] = mul <vscale x 4 x i32> [[TMP12]], splat (i32 1) ; NO-VP-OUTLOOP-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i32> zeroinitializer, [[TMP14]] -; NO-VP-OUTLOOP-NEXT: [[TMP16:%.*]] = trunc i64 [[TMP10]] to i32 +; NO-VP-OUTLOOP-NEXT: [[TMP16:%.*]] = trunc i64 [[TMP7]] to i32 ; NO-VP-OUTLOOP-NEXT: [[TMP17:%.*]] = mul i32 1, [[TMP16]] ; NO-VP-OUTLOOP-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP17]], i64 0 ; NO-VP-OUTLOOP-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer @@ -644,7 +598,7 @@ define i32 @step_cond_add(ptr %a, i64 %n, i32 %start) { ; NO-VP-OUTLOOP-NEXT: [[TMP27:%.*]] = icmp sgt <vscale x 4 x i32> [[WIDE_MASKED_LOAD]], [[VEC_IND]] ; NO-VP-OUTLOOP-NEXT: [[TMP22:%.*]] = select <vscale x 4 x i1> [[TMP27]], <vscale x 4 x i32> [[WIDE_MASKED_LOAD]], <vscale x 4 x i32> zeroinitializer ; NO-VP-OUTLOOP-NEXT: [[TMP23]] = add <vscale x 4 x i32> [[TMP22]], [[VEC_PHI]] -; NO-VP-OUTLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]] +; NO-VP-OUTLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]] ; NO-VP-OUTLOOP-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i32> [[VEC_IND]], [[DOTSPLAT]] ; NO-VP-OUTLOOP-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-OUTLOOP-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] @@ -684,12 +638,10 @@ define i32 @step_cond_add(ptr %a, i64 %n, i32 %start) { ; NO-VP-INLOOP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; NO-VP-INLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; NO-VP-INLOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-INLOOP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-INLOOP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; NO-VP-INLOOP-NEXT: [[TMP6:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32() ; NO-VP-INLOOP-NEXT: [[TMP8:%.*]] = mul <vscale x 4 x i32> [[TMP6]], splat (i32 1) ; NO-VP-INLOOP-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i32> zeroinitializer, [[TMP8]] -; NO-VP-INLOOP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP5]] to i32 +; NO-VP-INLOOP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP3]] to i32 ; NO-VP-INLOOP-NEXT: [[TMP11:%.*]] = mul i32 1, [[TMP10]] ; NO-VP-INLOOP-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP11]], i64 0 ; NO-VP-INLOOP-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer @@ -704,7 +656,7 @@ define i32 @step_cond_add(ptr %a, i64 %n, i32 %start) { ; NO-VP-INLOOP-NEXT: [[TMP16:%.*]] = select <vscale x 4 x i1> [[TMP15]], <vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> zeroinitializer ; NO-VP-INLOOP-NEXT: [[TMP17:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP16]]) ; NO-VP-INLOOP-NEXT: [[TMP18]] = add i32 [[VEC_PHI]], [[TMP17]] -; NO-VP-INLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-INLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; NO-VP-INLOOP-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i32> [[VEC_IND]], [[DOTSPLAT]] ; NO-VP-INLOOP-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-INLOOP-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] @@ -757,12 +709,6 @@ define i32 @step_cond_add_pred(ptr %a, i64 %n, i32 %start) { ; IF-EVL-OUTLOOP-NEXT: entry: ; IF-EVL-OUTLOOP-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; IF-EVL-OUTLOOP: vector.ph: -; IF-EVL-OUTLOOP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-OUTLOOP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; IF-EVL-OUTLOOP-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1 -; IF-EVL-OUTLOOP-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP6]] -; IF-EVL-OUTLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] -; IF-EVL-OUTLOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-OUTLOOP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-OUTLOOP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; IF-EVL-OUTLOOP-NEXT: [[TMP9:%.*]] = insertelement <vscale x 4 x i32> zeroinitializer, i32 [[START]], i32 0 @@ -828,12 +774,6 @@ define i32 @step_cond_add_pred(ptr %a, i64 %n, i32 %start) { ; IF-EVL-INLOOP-NEXT: entry: ; IF-EVL-INLOOP-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; IF-EVL-INLOOP: vector.ph: -; IF-EVL-INLOOP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-INLOOP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; IF-EVL-INLOOP-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1 -; IF-EVL-INLOOP-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP6]] -; IF-EVL-INLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] -; IF-EVL-INLOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-INLOOP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-INLOOP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; IF-EVL-INLOOP-NEXT: [[TMP9:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32() @@ -898,13 +838,11 @@ define i32 @step_cond_add_pred(ptr %a, i64 %n, i32 %start) { ; NO-VP-OUTLOOP-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4 ; NO-VP-OUTLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP7]] ; NO-VP-OUTLOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-OUTLOOP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-OUTLOOP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 ; NO-VP-OUTLOOP-NEXT: [[TMP11:%.*]] = insertelement <vscale x 4 x i32> zeroinitializer, i32 [[START]], i32 0 ; NO-VP-OUTLOOP-NEXT: [[TMP12:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32() ; NO-VP-OUTLOOP-NEXT: [[TMP14:%.*]] = mul <vscale x 4 x i32> [[TMP12]], splat (i32 1) ; NO-VP-OUTLOOP-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i32> zeroinitializer, [[TMP14]] -; NO-VP-OUTLOOP-NEXT: [[TMP16:%.*]] = trunc i64 [[TMP10]] to i32 +; NO-VP-OUTLOOP-NEXT: [[TMP16:%.*]] = trunc i64 [[TMP7]] to i32 ; NO-VP-OUTLOOP-NEXT: [[TMP17:%.*]] = mul i32 1, [[TMP16]] ; NO-VP-OUTLOOP-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP17]], i64 0 ; NO-VP-OUTLOOP-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer @@ -918,7 +856,7 @@ define i32 @step_cond_add_pred(ptr %a, i64 %n, i32 %start) { ; NO-VP-OUTLOOP-NEXT: [[TMP28:%.*]] = icmp sgt <vscale x 4 x i32> [[WIDE_MASKED_LOAD]], [[VEC_IND]] ; NO-VP-OUTLOOP-NEXT: [[TMP22:%.*]] = add <vscale x 4 x i32> [[VEC_PHI]], [[WIDE_MASKED_LOAD]] ; NO-VP-OUTLOOP-NEXT: [[PREDPHI]] = select <vscale x 4 x i1> [[TMP28]], <vscale x 4 x i32> [[TMP22]], <vscale x 4 x i32> [[VEC_PHI]] -; NO-VP-OUTLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]] +; NO-VP-OUTLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]] ; NO-VP-OUTLOOP-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i32> [[VEC_IND]], [[DOTSPLAT]] ; NO-VP-OUTLOOP-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-OUTLOOP-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] @@ -962,12 +900,10 @@ define i32 @step_cond_add_pred(ptr %a, i64 %n, i32 %start) { ; NO-VP-INLOOP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; NO-VP-INLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; NO-VP-INLOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-INLOOP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-INLOOP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; NO-VP-INLOOP-NEXT: [[TMP6:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32() ; NO-VP-INLOOP-NEXT: [[TMP8:%.*]] = mul <vscale x 4 x i32> [[TMP6]], splat (i32 1) ; NO-VP-INLOOP-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i32> zeroinitializer, [[TMP8]] -; NO-VP-INLOOP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP5]] to i32 +; NO-VP-INLOOP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP3]] to i32 ; NO-VP-INLOOP-NEXT: [[TMP11:%.*]] = mul i32 1, [[TMP10]] ; NO-VP-INLOOP-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP11]], i64 0 ; NO-VP-INLOOP-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer @@ -982,7 +918,7 @@ define i32 @step_cond_add_pred(ptr %a, i64 %n, i32 %start) { ; NO-VP-INLOOP-NEXT: [[TMP16:%.*]] = select <vscale x 4 x i1> [[TMP15]], <vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> zeroinitializer ; NO-VP-INLOOP-NEXT: [[TMP17:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP16]]) ; NO-VP-INLOOP-NEXT: [[TMP18]] = add i32 [[VEC_PHI]], [[TMP17]] -; NO-VP-INLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-INLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; NO-VP-INLOOP-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i32> [[VEC_IND]], [[DOTSPLAT]] ; NO-VP-INLOOP-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-INLOOP-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll index a216aa8..0d1d9a9 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll @@ -13,12 +13,6 @@ define void @test_sdiv(ptr noalias %a, ptr noalias %b, ptr noalias %c) { ; IF-EVL-NEXT: [[LOOP_PREHEADER:.*]]: ; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; IF-EVL: [[VECTOR_PH]]: -; IF-EVL-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; IF-EVL-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP2]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2 ; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]] @@ -71,8 +65,6 @@ define void @test_sdiv(ptr noalias %a, ptr noalias %b, ptr noalias %c) { ; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP13]], 2 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] ; NO-VP: [[VECTOR_BODY]]: ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -83,7 +75,7 @@ define void @test_sdiv(ptr noalias %a, ptr noalias %b, ptr noalias %c) { ; NO-VP-NEXT: [[TMP8:%.*]] = sdiv <vscale x 2 x i64> [[WIDE_LOAD]], [[WIDE_LOAD1]] ; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[C]], i64 [[INDEX]] ; NO-VP-NEXT: store <vscale x 2 x i64> [[TMP8]], ptr [[TMP9]], align 8 -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; NO-VP-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; NO-VP: [[MIDDLE_BLOCK]]: @@ -134,12 +126,6 @@ define void @test_udiv(ptr noalias %a, ptr noalias %b, ptr noalias %c) { ; IF-EVL-NEXT: [[LOOP_PREHEADER:.*]]: ; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; IF-EVL: [[VECTOR_PH]]: -; IF-EVL-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; IF-EVL-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP2]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2 ; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]] @@ -192,8 +178,6 @@ define void @test_udiv(ptr noalias %a, ptr noalias %b, ptr noalias %c) { ; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP13]], 2 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] ; NO-VP: [[VECTOR_BODY]]: ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -204,7 +188,7 @@ define void @test_udiv(ptr noalias %a, ptr noalias %b, ptr noalias %c) { ; NO-VP-NEXT: [[TMP8:%.*]] = udiv <vscale x 2 x i64> [[WIDE_LOAD]], [[WIDE_LOAD1]] ; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[C]], i64 [[INDEX]] ; NO-VP-NEXT: store <vscale x 2 x i64> [[TMP8]], ptr [[TMP9]], align 8 -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; NO-VP-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; NO-VP: [[MIDDLE_BLOCK]]: @@ -254,12 +238,6 @@ define void @test_srem(ptr noalias %a, ptr noalias %b, ptr noalias %c) { ; IF-EVL-NEXT: [[LOOP_PREHEADER:.*]]: ; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; IF-EVL: [[VECTOR_PH]]: -; IF-EVL-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; IF-EVL-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP2]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2 ; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]] @@ -312,8 +290,6 @@ define void @test_srem(ptr noalias %a, ptr noalias %b, ptr noalias %c) { ; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP13]], 2 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] ; NO-VP: [[VECTOR_BODY]]: ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -324,7 +300,7 @@ define void @test_srem(ptr noalias %a, ptr noalias %b, ptr noalias %c) { ; NO-VP-NEXT: [[TMP8:%.*]] = srem <vscale x 2 x i64> [[WIDE_LOAD]], [[WIDE_LOAD1]] ; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[C]], i64 [[INDEX]] ; NO-VP-NEXT: store <vscale x 2 x i64> [[TMP8]], ptr [[TMP9]], align 8 -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; NO-VP-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; NO-VP: [[MIDDLE_BLOCK]]: @@ -374,12 +350,6 @@ define void @test_urem(ptr noalias %a, ptr noalias %b, ptr noalias %c) { ; IF-EVL-NEXT: [[LOOP_PREHEADER:.*]]: ; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; IF-EVL: [[VECTOR_PH]]: -; IF-EVL-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; IF-EVL-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP2]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2 ; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]] @@ -432,8 +402,6 @@ define void @test_urem(ptr noalias %a, ptr noalias %b, ptr noalias %c) { ; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP13]], 2 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] ; NO-VP: [[VECTOR_BODY]]: ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -444,7 +412,7 @@ define void @test_urem(ptr noalias %a, ptr noalias %b, ptr noalias %c) { ; NO-VP-NEXT: [[TMP8:%.*]] = urem <vscale x 2 x i64> [[WIDE_LOAD]], [[WIDE_LOAD1]] ; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[C]], i64 [[INDEX]] ; NO-VP-NEXT: store <vscale x 2 x i64> [[TMP8]], ptr [[TMP9]], align 8 -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; NO-VP-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; NO-VP: [[MIDDLE_BLOCK]]: diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll index f92bf5a..76afbd45d 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll @@ -15,12 +15,6 @@ define void @first_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) { ; IF-EVL-NEXT: [[ENTRY:.*]]: ; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; IF-EVL: [[VECTOR_PH]]: -; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[TC]], [[TMP6]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; IF-EVL-NEXT: [[TMP25:%.*]] = trunc i64 [[TMP8]] to i32 @@ -78,8 +72,6 @@ define void @first_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) { ; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TC]], [[TMP3]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[TC]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; NO-VP-NEXT: [[TMP6:%.*]] = call i32 @llvm.vscale.i32() ; NO-VP-NEXT: [[TMP7:%.*]] = mul nuw i32 [[TMP6]], 4 ; NO-VP-NEXT: [[TMP8:%.*]] = sub i32 [[TMP7]], 1 @@ -94,7 +86,7 @@ define void @first_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) { ; NO-VP-NEXT: [[TMP13:%.*]] = add nsw <vscale x 4 x i32> [[TMP12]], [[WIDE_LOAD]] ; NO-VP-NEXT: [[TMP14:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[INDEX]] ; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP13]], ptr [[TMP14]], align 4 -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; NO-VP-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; NO-VP: [[MIDDLE_BLOCK]]: @@ -147,12 +139,6 @@ define void @second_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) { ; IF-EVL-NEXT: [[ENTRY:.*]]: ; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; IF-EVL: [[VECTOR_PH]]: -; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[TC]], [[TMP6]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; IF-EVL-NEXT: [[TMP32:%.*]] = trunc i64 [[TMP8]] to i32 @@ -218,8 +204,6 @@ define void @second_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) { ; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TC]], [[TMP3]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[TC]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; NO-VP-NEXT: [[TMP6:%.*]] = call i32 @llvm.vscale.i32() ; NO-VP-NEXT: [[TMP7:%.*]] = mul nuw i32 [[TMP6]], 4 ; NO-VP-NEXT: [[TMP8:%.*]] = sub i32 [[TMP7]], 1 @@ -240,7 +224,7 @@ define void @second_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) { ; NO-VP-NEXT: [[TMP17:%.*]] = add nsw <vscale x 4 x i32> [[TMP15]], [[TMP16]] ; NO-VP-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[INDEX]] ; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP17]], ptr [[TMP18]], align 4 -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; NO-VP-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; NO-VP: [[MIDDLE_BLOCK]]: @@ -300,12 +284,6 @@ define void @third_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) { ; IF-EVL-NEXT: [[ENTRY:.*]]: ; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; IF-EVL: [[VECTOR_PH]]: -; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[TC]], [[TMP6]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; IF-EVL-NEXT: [[TMP39:%.*]] = trunc i64 [[TMP8]] to i32 @@ -381,8 +359,6 @@ define void @third_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) { ; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TC]], [[TMP3]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[TC]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; NO-VP-NEXT: [[TMP6:%.*]] = call i32 @llvm.vscale.i32() ; NO-VP-NEXT: [[TMP7:%.*]] = mul nuw i32 [[TMP6]], 4 ; NO-VP-NEXT: [[TMP8:%.*]] = sub i32 [[TMP7]], 1 @@ -410,7 +386,7 @@ define void @third_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) { ; NO-VP-NEXT: [[TMP22:%.*]] = add <vscale x 4 x i32> [[TMP21]], [[TMP18]] ; NO-VP-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[INDEX]] ; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP22]], ptr [[TMP23]], align 4 -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; NO-VP-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP25]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; NO-VP: [[MIDDLE_BLOCK]]: @@ -486,8 +462,6 @@ define i32 @FOR_reduction(ptr noalias %A, ptr noalias %B, i64 %TC) { ; IF-EVL-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TC]], [[TMP3]] ; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[TC]], [[N_MOD_VF]] -; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; IF-EVL-NEXT: [[TMP6:%.*]] = call i32 @llvm.vscale.i32() ; IF-EVL-NEXT: [[TMP7:%.*]] = mul nuw i32 [[TMP6]], 4 ; IF-EVL-NEXT: [[TMP8:%.*]] = sub i32 [[TMP7]], 1 @@ -502,7 +476,7 @@ define i32 @FOR_reduction(ptr noalias %A, ptr noalias %B, i64 %TC) { ; IF-EVL-NEXT: [[TMP11:%.*]] = add nsw <vscale x 4 x i32> [[TMP10]], [[WIDE_LOAD]] ; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[INDVARS]] ; IF-EVL-NEXT: store <vscale x 4 x i32> [[TMP11]], ptr [[TMP12]], align 4 -; IF-EVL-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDVARS]], [[TMP5]] +; IF-EVL-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDVARS]], [[TMP3]] ; IF-EVL-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; IF-EVL-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; IF-EVL: [[MIDDLE_BLOCK]]: @@ -547,8 +521,6 @@ define i32 @FOR_reduction(ptr noalias %A, ptr noalias %B, i64 %TC) { ; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TC]], [[TMP3]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[TC]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; NO-VP-NEXT: [[TMP6:%.*]] = call i32 @llvm.vscale.i32() ; NO-VP-NEXT: [[TMP7:%.*]] = mul nuw i32 [[TMP6]], 4 ; NO-VP-NEXT: [[TMP8:%.*]] = sub i32 [[TMP7]], 1 @@ -563,7 +535,7 @@ define i32 @FOR_reduction(ptr noalias %A, ptr noalias %B, i64 %TC) { ; NO-VP-NEXT: [[TMP13:%.*]] = add nsw <vscale x 4 x i32> [[TMP12]], [[WIDE_LOAD]] ; NO-VP-NEXT: [[TMP14:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[INDEX]] ; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP13]], ptr [[TMP14]], align 4 -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; NO-VP-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; NO-VP: [[MIDDLE_BLOCK]]: @@ -621,12 +593,6 @@ define void @first_order_recurrence_indvar(ptr noalias %A, i64 %TC) { ; IF-EVL-NEXT: [[ENTRY:.*]]: ; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; IF-EVL: [[VECTOR_PH]]: -; IF-EVL-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; IF-EVL-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[TC]], [[TMP2]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP18]], 2 ; IF-EVL-NEXT: [[TMP5:%.*]] = trunc i64 [[TMP4]] to i32 @@ -689,12 +655,10 @@ define void @first_order_recurrence_indvar(ptr noalias %A, i64 %TC) { ; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TC]], [[TMP3]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[TC]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP4]], 2 ; NO-VP-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64() ; NO-VP-NEXT: [[TMP7:%.*]] = mul <vscale x 2 x i64> [[TMP6]], splat (i64 1) ; NO-VP-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP7]] -; NO-VP-NEXT: [[TMP10:%.*]] = mul i64 1, [[TMP9]] +; NO-VP-NEXT: [[TMP10:%.*]] = mul i64 1, [[TMP3]] ; NO-VP-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP10]], i64 0 ; NO-VP-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer ; NO-VP-NEXT: [[TMP14:%.*]] = call i32 @llvm.vscale.i32() @@ -710,7 +674,7 @@ define void @first_order_recurrence_indvar(ptr noalias %A, i64 %TC) { ; NO-VP-NEXT: [[TMP13:%.*]] = call <vscale x 2 x i64> @llvm.vector.splice.nxv2i64(<vscale x 2 x i64> [[VECTOR_RECUR]], <vscale x 2 x i64> [[TMP12]], i32 -1) ; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw i64, ptr [[A]], i64 [[INDEX]] ; NO-VP-NEXT: store <vscale x 2 x i64> [[TMP13]], ptr [[TMP11]], align 8 -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP9]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; NO-VP-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] ; NO-VP-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP23]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll index da5aed9..47339e0 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll @@ -12,12 +12,6 @@ define void @gather_scatter(ptr noalias %in, ptr noalias %out, ptr noalias %inde ; IF-EVL-NEXT: entry: ; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]] ; IF-EVL: vector.ph: -; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 -; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 2 ; IF-EVL-NEXT: [[TMP9:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64() @@ -27,7 +21,7 @@ define void @gather_scatter(ptr noalias %in, ptr noalias %out, ptr noalias %inde ; IF-EVL: vector.body: ; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[INDEX_EVL_NEXT:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], [[ENTRY]] ], [ [[VEC_IND_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[ENTRY]] ], [ [[AVL_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[ENTRY]] ], [ [[AVL_NEXT:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) ; IF-EVL-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64 ; IF-EVL-NEXT: [[TMP13:%.*]] = mul i64 1, [[TMP12]] @@ -75,12 +69,10 @@ define void @gather_scatter(ptr noalias %in, ptr noalias %out, ptr noalias %inde ; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; NO-VP-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64() ; NO-VP-NEXT: [[TMP7:%.*]] = mul <vscale x 2 x i64> [[TMP6]], splat (i64 1) ; NO-VP-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP7]] -; NO-VP-NEXT: [[TMP8:%.*]] = mul i64 1, [[TMP5]] +; NO-VP-NEXT: [[TMP8:%.*]] = mul i64 1, [[TMP3]] ; NO-VP-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP8]], i64 0 ; NO-VP-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer ; NO-VP-NEXT: br label [[FOR_BODY:%.*]] @@ -93,7 +85,7 @@ define void @gather_scatter(ptr noalias %in, ptr noalias %out, ptr noalias %inde ; NO-VP-NEXT: [[WIDE_MASKED_GATHER2:%.*]] = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32.nxv2p0(<vscale x 2 x ptr> [[TMP10]], i32 4, <vscale x 2 x i1> splat (i1 true), <vscale x 2 x float> poison) ; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, ptr [[OUT:%.*]], <vscale x 2 x i64> [[WIDE_MASKED_GATHER]] ; NO-VP-NEXT: call void @llvm.masked.scatter.nxv2f32.nxv2p0(<vscale x 2 x float> [[WIDE_MASKED_GATHER2]], <vscale x 2 x ptr> [[TMP11]], i32 4, <vscale x 2 x i1> splat (i1 true)) -; NO-VP-NEXT: [[INDVARS_IV_NEXT]] = add nuw i64 [[INDVARS_IV]], [[TMP5]] +; NO-VP-NEXT: [[INDVARS_IV_NEXT]] = add nuw i64 [[INDVARS_IV]], [[TMP3]] ; NO-VP-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] ; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll index 433d1e4..f425010 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll @@ -14,19 +14,13 @@ define i32 @add(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: entry: ; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; IF-EVL: vector.ph: -; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]] ; IF-EVL: vector.body: ; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START:%.*]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ] -; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]]) @@ -67,8 +61,6 @@ define i32 @add(ptr %a, i64 %n, i32 %start) { ; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]] ; NO-VP: vector.body: ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -77,7 +69,7 @@ define i32 @add(ptr %a, i64 %n, i32 %start) { ; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 4 ; NO-VP-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]]) ; NO-VP-NEXT: [[TMP10]] = add i32 [[VEC_PHI]], [[TMP9]] -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; NO-VP: middle.block: @@ -229,19 +221,13 @@ define i32 @or(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: entry: ; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; IF-EVL: vector.ph: -; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]] ; IF-EVL: vector.body: ; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START:%.*]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ] -; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]]) @@ -282,8 +268,6 @@ define i32 @or(ptr %a, i64 %n, i32 %start) { ; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]] ; NO-VP: vector.body: ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -292,7 +276,7 @@ define i32 @or(ptr %a, i64 %n, i32 %start) { ; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 4 ; NO-VP-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.or.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]]) ; NO-VP-NEXT: [[TMP10]] = or i32 [[VEC_PHI]], [[TMP9]] -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; NO-VP: middle.block: @@ -337,19 +321,13 @@ define i32 @and(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: entry: ; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; IF-EVL: vector.ph: -; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]] ; IF-EVL: vector.body: ; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START:%.*]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ] -; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]]) @@ -390,8 +368,6 @@ define i32 @and(ptr %a, i64 %n, i32 %start) { ; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]] ; NO-VP: vector.body: ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -400,7 +376,7 @@ define i32 @and(ptr %a, i64 %n, i32 %start) { ; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 4 ; NO-VP-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.and.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]]) ; NO-VP-NEXT: [[TMP10]] = and i32 [[VEC_PHI]], [[TMP9]] -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; NO-VP: middle.block: @@ -445,19 +421,13 @@ define i32 @xor(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: entry: ; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; IF-EVL: vector.ph: -; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]] ; IF-EVL: vector.body: ; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START:%.*]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ] -; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]]) @@ -498,8 +468,6 @@ define i32 @xor(ptr %a, i64 %n, i32 %start) { ; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]] ; NO-VP: vector.body: ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -508,7 +476,7 @@ define i32 @xor(ptr %a, i64 %n, i32 %start) { ; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 4 ; NO-VP-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.xor.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]]) ; NO-VP-NEXT: [[TMP10]] = xor i32 [[VEC_PHI]], [[TMP9]] -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; NO-VP: middle.block: @@ -553,19 +521,13 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: entry: ; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; IF-EVL: vector.ph: -; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP7]], 4 ; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]] ; IF-EVL: vector.body: ; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START:%.*]], [[VECTOR_PH]] ], [ [[RDX_MINMAX:%.*]], [[VECTOR_BODY]] ] -; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]]) @@ -607,8 +569,6 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) { ; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]] ; NO-VP: vector.body: ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -617,7 +577,7 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) { ; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 4 ; NO-VP-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.smin.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]]) ; NO-VP-NEXT: [[RDX_MINMAX]] = call i32 @llvm.smin.i32(i32 [[TMP9]], i32 [[VEC_PHI]]) -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; NO-VP-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; NO-VP: middle.block: @@ -664,19 +624,13 @@ define i32 @smax(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: entry: ; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; IF-EVL: vector.ph: -; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP7]], 4 ; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]] ; IF-EVL: vector.body: ; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START:%.*]], [[VECTOR_PH]] ], [ [[RDX_MINMAX:%.*]], [[VECTOR_BODY]] ] -; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]]) @@ -718,8 +672,6 @@ define i32 @smax(ptr %a, i64 %n, i32 %start) { ; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]] ; NO-VP: vector.body: ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -728,7 +680,7 @@ define i32 @smax(ptr %a, i64 %n, i32 %start) { ; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 4 ; NO-VP-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.smax.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]]) ; NO-VP-NEXT: [[RDX_MINMAX]] = call i32 @llvm.smax.i32(i32 [[TMP9]], i32 [[VEC_PHI]]) -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; NO-VP-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; NO-VP: middle.block: @@ -775,19 +727,13 @@ define i32 @umin(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: entry: ; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; IF-EVL: vector.ph: -; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP7]], 4 ; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]] ; IF-EVL: vector.body: ; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START:%.*]], [[VECTOR_PH]] ], [ [[RDX_MINMAX:%.*]], [[VECTOR_BODY]] ] -; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]]) @@ -829,8 +775,6 @@ define i32 @umin(ptr %a, i64 %n, i32 %start) { ; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]] ; NO-VP: vector.body: ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -839,7 +783,7 @@ define i32 @umin(ptr %a, i64 %n, i32 %start) { ; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 4 ; NO-VP-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.umin.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]]) ; NO-VP-NEXT: [[RDX_MINMAX]] = call i32 @llvm.umin.i32(i32 [[TMP9]], i32 [[VEC_PHI]]) -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; NO-VP-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; NO-VP: middle.block: @@ -886,19 +830,13 @@ define i32 @umax(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: entry: ; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; IF-EVL: vector.ph: -; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP7]], 4 ; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]] ; IF-EVL: vector.body: ; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START:%.*]], [[VECTOR_PH]] ], [ [[RDX_MINMAX:%.*]], [[VECTOR_BODY]] ] -; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]]) @@ -940,8 +878,6 @@ define i32 @umax(ptr %a, i64 %n, i32 %start) { ; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]] ; NO-VP: vector.body: ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -950,7 +886,7 @@ define i32 @umax(ptr %a, i64 %n, i32 %start) { ; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 4 ; NO-VP-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.umax.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]]) ; NO-VP-NEXT: [[RDX_MINMAX]] = call i32 @llvm.umax.i32(i32 [[TMP9]], i32 [[VEC_PHI]]) -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; NO-VP-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; NO-VP: middle.block: @@ -997,19 +933,13 @@ define float @fadd(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: entry: ; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; IF-EVL: vector.ph: -; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]] ; IF-EVL: vector.body: ; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi float [ [[START:%.*]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ] -; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]]) @@ -1050,8 +980,6 @@ define float @fadd(ptr %a, i64 %n, float %start) { ; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]] ; NO-VP: vector.body: ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -1060,7 +988,7 @@ define float @fadd(ptr %a, i64 %n, float %start) { ; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP7]], align 4 ; NO-VP-NEXT: [[TMP9:%.*]] = call reassoc float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[WIDE_LOAD]]) ; NO-VP-NEXT: [[TMP10]] = fadd reassoc float [[VEC_PHI]], [[TMP9]] -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; NO-VP: middle.block: @@ -1212,19 +1140,13 @@ define float @fmin(ptr %a, i64 %n, float %start) #0 { ; IF-EVL-NEXT: entry: ; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; IF-EVL: vector.ph: -; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP7]], 4 ; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]] ; IF-EVL: vector.body: ; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi float [ [[START:%.*]], [[VECTOR_PH]] ], [ [[RDX_MINMAX_SELECT:%.*]], [[VECTOR_BODY]] ] -; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]]) @@ -1267,8 +1189,6 @@ define float @fmin(ptr %a, i64 %n, float %start) #0 { ; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]] ; NO-VP: vector.body: ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -1278,7 +1198,7 @@ define float @fmin(ptr %a, i64 %n, float %start) #0 { ; NO-VP-NEXT: [[TMP9:%.*]] = call fast float @llvm.vector.reduce.fmin.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD]]) ; NO-VP-NEXT: [[RDX_MINMAX_CMP:%.*]] = fcmp fast olt float [[TMP9]], [[VEC_PHI]] ; NO-VP-NEXT: [[RDX_MINMAX_SELECT]] = select fast i1 [[RDX_MINMAX_CMP]], float [[TMP9]], float [[VEC_PHI]] -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; NO-VP-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] ; NO-VP: middle.block: @@ -1325,19 +1245,13 @@ define float @fmax(ptr %a, i64 %n, float %start) #0 { ; IF-EVL-NEXT: entry: ; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; IF-EVL: vector.ph: -; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP7]], 4 ; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]] ; IF-EVL: vector.body: ; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi float [ [[START:%.*]], [[VECTOR_PH]] ], [ [[RDX_MINMAX_SELECT:%.*]], [[VECTOR_BODY]] ] -; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]]) @@ -1380,8 +1294,6 @@ define float @fmax(ptr %a, i64 %n, float %start) #0 { ; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]] ; NO-VP: vector.body: ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -1391,7 +1303,7 @@ define float @fmax(ptr %a, i64 %n, float %start) #0 { ; NO-VP-NEXT: [[TMP9:%.*]] = call fast float @llvm.vector.reduce.fmax.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD]]) ; NO-VP-NEXT: [[RDX_MINMAX_CMP:%.*]] = fcmp fast ogt float [[TMP9]], [[VEC_PHI]] ; NO-VP-NEXT: [[RDX_MINMAX_SELECT]] = select fast i1 [[RDX_MINMAX_CMP]], float [[TMP9]], float [[VEC_PHI]] -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; NO-VP-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] ; NO-VP: middle.block: @@ -1654,19 +1566,13 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n, float %start) { ; IF-EVL-NEXT: entry: ; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; IF-EVL: vector.ph: -; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]] ; IF-EVL: vector.body: ; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi float [ [[START:%.*]], [[VECTOR_PH]] ], [ [[TMP18:%.*]], [[VECTOR_BODY]] ] -; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; IF-EVL-NEXT: [[TMP10:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP10]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]]) @@ -1712,8 +1618,6 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n, float %start) { ; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]] ; NO-VP: vector.body: ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -1725,7 +1629,7 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n, float %start) { ; NO-VP-NEXT: [[TMP11:%.*]] = fmul reassoc <vscale x 4 x float> [[WIDE_LOAD]], [[WIDE_LOAD1]] ; NO-VP-NEXT: [[TMP12:%.*]] = call reassoc float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[TMP11]]) ; NO-VP-NEXT: [[TMP13]] = fadd reassoc float [[VEC_PHI]], [[TMP12]] -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]] ; NO-VP: middle.block: @@ -1774,19 +1678,13 @@ define i32 @anyof_icmp(ptr %a, i64 %n, i32 %start, i32 %inv) { ; IF-EVL-NEXT: entry: ; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; IF-EVL: vector.ph: -; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]] ; IF-EVL: vector.body: ; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP16:%.*]], [[VECTOR_BODY]] ] -; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]]) @@ -1831,8 +1729,6 @@ define i32 @anyof_icmp(ptr %a, i64 %n, i32 %start, i32 %inv) { ; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]] ; NO-VP: vector.body: ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -1841,7 +1737,7 @@ define i32 @anyof_icmp(ptr %a, i64 %n, i32 %start, i32 %inv) { ; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 4 ; NO-VP-NEXT: [[TMP9:%.*]] = icmp slt <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 3) ; NO-VP-NEXT: [[TMP10]] = or <vscale x 4 x i1> [[VEC_PHI]], [[TMP9]] -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP34:![0-9]+]] ; NO-VP: middle.block: @@ -1891,19 +1787,13 @@ define i32 @anyof_fcmp(ptr %a, i64 %n, i32 %start, i32 %inv) { ; IF-EVL-NEXT: entry: ; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; IF-EVL: vector.ph: -; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]] ; IF-EVL: vector.body: ; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP16:%.*]], [[VECTOR_BODY]] ] -; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]]) @@ -1948,8 +1838,6 @@ define i32 @anyof_fcmp(ptr %a, i64 %n, i32 %start, i32 %inv) { ; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]] ; NO-VP: vector.body: ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -1958,7 +1846,7 @@ define i32 @anyof_fcmp(ptr %a, i64 %n, i32 %start, i32 %inv) { ; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP7]], align 4 ; NO-VP-NEXT: [[TMP9:%.*]] = fcmp fast olt <vscale x 4 x float> [[WIDE_LOAD]], splat (float 3.000000e+00) ; NO-VP-NEXT: [[TMP10]] = or <vscale x 4 x i1> [[VEC_PHI]], [[TMP9]] -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]] ; NO-VP: middle.block: diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll index c5d2739..8198409 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll @@ -13,18 +13,12 @@ define void @interleave(ptr noalias %a, ptr noalias %b, i64 %N) { ; IF-EVL-NEXT: entry: ; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; IF-EVL: vector.ph: -; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; IF-EVL-NEXT: [[TMP2:%.*]] = sub i64 [[TMP5]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP2]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]] ; IF-EVL: vector.body: ; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] -; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP11]], i64 0 ; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer @@ -75,8 +69,6 @@ define void @interleave(ptr noalias %a, ptr noalias %b, i64 %N) { ; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP4]], 4 ; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]] ; NO-VP: vector.body: ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -88,7 +80,7 @@ define void @interleave(ptr noalias %a, ptr noalias %b, i64 %N) { ; NO-VP-NEXT: [[TMP21:%.*]] = add nsw <vscale x 4 x i32> [[TMP19]], [[TMP18]] ; NO-VP-NEXT: [[TMP22:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]] ; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP21]], ptr [[TMP22]], align 4 -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; NO-VP-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; NO-VP: middle.block: diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-intermediate-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-intermediate-store.ll index be6ae1d..84ea3b9 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-intermediate-store.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-intermediate-store.ll @@ -31,12 +31,6 @@ define void @reduction_intermediate_store(ptr %a, i64 %n, i32 %start, ptr %addr) ; IF-EVL-OUTLOOP-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] ; IF-EVL-OUTLOOP-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[ENTRY:%.*]] ; IF-EVL-OUTLOOP: vector.ph: -; IF-EVL-OUTLOOP-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-OUTLOOP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4 -; IF-EVL-OUTLOOP-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], 1 -; IF-EVL-OUTLOOP-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP7]] -; IF-EVL-OUTLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP6]] -; IF-EVL-OUTLOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-OUTLOOP-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-OUTLOOP-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 4 ; IF-EVL-OUTLOOP-NEXT: [[TMP10:%.*]] = insertelement <vscale x 4 x i32> zeroinitializer, i32 [[START]], i32 0 @@ -89,12 +83,6 @@ define void @reduction_intermediate_store(ptr %a, i64 %n, i32 %start, ptr %addr) ; IF-EVL-INLOOP-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] ; IF-EVL-INLOOP-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] ; IF-EVL-INLOOP: vector.ph: -; IF-EVL-INLOOP-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-INLOOP-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 4 -; IF-EVL-INLOOP-NEXT: [[TMP10:%.*]] = sub i64 [[TMP9]], 1 -; IF-EVL-INLOOP-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP10]] -; IF-EVL-INLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP9]] -; IF-EVL-INLOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-INLOOP-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-INLOOP-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 4 ; IF-EVL-INLOOP-NEXT: br label [[VECTOR_BODY:%.*]] @@ -152,8 +140,6 @@ define void @reduction_intermediate_store(ptr %a, i64 %n, i32 %start, ptr %addr) ; NO-VP-OUTLOOP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; NO-VP-OUTLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP5]] ; NO-VP-OUTLOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-OUTLOOP-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-OUTLOOP-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4 ; NO-VP-OUTLOOP-NEXT: [[TMP8:%.*]] = insertelement <vscale x 4 x i32> zeroinitializer, i32 [[START]], i32 0 ; NO-VP-OUTLOOP-NEXT: br label [[VECTOR_BODY:%.*]] ; NO-VP-OUTLOOP: vector.body: @@ -162,7 +148,7 @@ define void @reduction_intermediate_store(ptr %a, i64 %n, i32 %start, ptr %addr) ; NO-VP-OUTLOOP-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]] ; NO-VP-OUTLOOP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP10]], align 4, !alias.scope [[META0:![0-9]+]] ; NO-VP-OUTLOOP-NEXT: [[TMP12]] = add <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]] -; NO-VP-OUTLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]] +; NO-VP-OUTLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] ; NO-VP-OUTLOOP-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-OUTLOOP-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; NO-VP-OUTLOOP: middle.block: @@ -207,8 +193,6 @@ define void @reduction_intermediate_store(ptr %a, i64 %n, i32 %start, ptr %addr) ; NO-VP-INLOOP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; NO-VP-INLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP5]] ; NO-VP-INLOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-INLOOP-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-INLOOP-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4 ; NO-VP-INLOOP-NEXT: br label [[VECTOR_BODY:%.*]] ; NO-VP-INLOOP: vector.body: ; NO-VP-INLOOP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -217,7 +201,7 @@ define void @reduction_intermediate_store(ptr %a, i64 %n, i32 %start, ptr %addr) ; NO-VP-INLOOP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP9]], align 4, !alias.scope [[META0:![0-9]+]] ; NO-VP-INLOOP-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]]) ; NO-VP-INLOOP-NEXT: [[TMP12]] = add i32 [[VEC_PHI]], [[TMP11]] -; NO-VP-INLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]] +; NO-VP-INLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] ; NO-VP-INLOOP-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-INLOOP-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; NO-VP-INLOOP: middle.block: diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll index 62a4f73..acfad66 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll @@ -12,18 +12,12 @@ define void @iv32(ptr noalias %a, ptr noalias %b, i32 %N) { ; IF-EVL-NEXT: entry: ; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]] ; IF-EVL: vector.ph: -; IF-EVL-NEXT: [[TMP4:%.*]] = call i32 @llvm.vscale.i32() -; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i32 [[TMP4]], 4 -; IF-EVL-NEXT: [[TMP8:%.*]] = sub i32 [[TMP5]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i32 [[N:%.*]], [[TMP8]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N_RND_UP]], [[TMP5]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i32 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.vscale.i32() ; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i32 [[TMP9]], 4 ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: vector.body: ; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[INDEX_EVL_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[TMP11:%.*]] = phi i32 [ [[N]], [[ENTRY]] ], [ [[AVL_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[TMP11:%.*]] = phi i32 [ [[N:%.*]], [[ENTRY]] ], [ [[AVL_NEXT:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i32(i32 [[TMP11]], i32 4, i1 true) ; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i32 [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]]) @@ -61,8 +55,6 @@ define void @iv32(ptr noalias %a, ptr noalias %b, i32 %N) { ; NO-VP-NEXT: [[TMP11:%.*]] = mul nuw i32 [[TMP1]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N]], [[TMP11]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i32 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP2:%.*]] = call i32 @llvm.vscale.i32() -; NO-VP-NEXT: [[TMP12:%.*]] = mul nuw i32 [[TMP2]], 4 ; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]] ; NO-VP: vector.body: ; NO-VP-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -70,7 +62,7 @@ define void @iv32(ptr noalias %a, ptr noalias %b, i32 %N) { ; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP4]], align 4 ; NO-VP-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[INDEX]] ; NO-VP-NEXT: store <vscale x 4 x i32> [[WIDE_LOAD]], ptr [[TMP6]], align 4 -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP12]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP11]] ; NO-VP-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; NO-VP: middle.block: diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll index 296405d..60e0aab 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll @@ -16,12 +16,6 @@ define void @trip_count_max_1024(ptr %p, i64 %tc) vscale_range(2, 1024) { ; CHECK-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[TC]], i64 1) ; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 -; CHECK-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1 -; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[UMAX]], [[TMP6]] -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 2 ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] @@ -85,12 +79,6 @@ define void @overflow_at_0(ptr %p, i64 %tc) vscale_range(2, 1024) { ; CHECK: [[LOOP_PREHEADER]]: ; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 -; CHECK-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1 -; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[TC]], [[TMP6]] -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 2 ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] @@ -154,12 +142,6 @@ define void @no_overflow_at_0(ptr %p, i64 %tc) vscale_range(2, 1024) { ; CHECK: [[LOOP_PREHEADER]]: ; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; CHECK-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 -; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[TC_ADD]], [[TMP2]] -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2 ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll index e06bbe9..7a39502 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll @@ -12,18 +12,12 @@ define void @masked_loadstore(ptr noalias %a, ptr noalias %b, i64 %n) { ; IF-EVL-NEXT: entry: ; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; IF-EVL: vector.ph: -; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]] ; IF-EVL: vector.body: ; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] -; IF-EVL-NEXT: [[TMP9:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[TMP9:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP9]], i32 4, i1 true) ; IF-EVL-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP15]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]]) @@ -72,8 +66,6 @@ define void @masked_loadstore(ptr noalias %a, ptr noalias %b, i64 %n) { ; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; NO-VP-NEXT: [[INC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; NO-VP-NEXT: br label [[FOR_BODY:%.*]] ; NO-VP: vector.body: ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[FOR_BODY]] ] @@ -84,7 +76,7 @@ define void @masked_loadstore(ptr noalias %a, ptr noalias %b, i64 %n) { ; NO-VP-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[TMP8]], i32 4, <vscale x 4 x i1> [[TMP7]], <vscale x 4 x i32> poison) ; NO-VP-NEXT: [[TMP9:%.*]] = add <vscale x 4 x i32> [[WIDE_LOAD]], [[WIDE_MASKED_LOAD]] ; NO-VP-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP9]], ptr [[TMP8]], i32 4, <vscale x 4 x i1> [[TMP7]]) -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; NO-VP-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[INC]] ; NO-VP-NEXT: br i1 [[TMP10]], label [[FOR_INC:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; NO-VP: middle.block: diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll index 775d9ca..8142154 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll @@ -14,19 +14,13 @@ define float @fadd(ptr noalias nocapture readonly %a, i64 %n) { ; IF-EVL-NEXT: entry: ; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; IF-EVL: vector.ph: -; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]] ; IF-EVL: vector.body: ; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi float [ 0.000000e+00, [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ] -; IF-EVL-NEXT: [[TMP9:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[TMP9:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP9]], i32 4, i1 true) ; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]]) @@ -66,8 +60,6 @@ define float @fadd(ptr noalias nocapture readonly %a, i64 %n) { ; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N1]], [[TMP3]] ; NO-VP-NEXT: [[N:%.*]] = sub i64 [[N1]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; NO-VP-NEXT: br label [[FOR_BODY:%.*]] ; NO-VP: vector.body: ; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] @@ -75,7 +67,7 @@ define float @fadd(ptr noalias nocapture readonly %a, i64 %n) { ; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[IV]] ; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[ARRAYIDX]], align 4 ; NO-VP-NEXT: [[ADD]] = call float @llvm.vector.reduce.fadd.nxv4f32(float [[SUM_07]], <vscale x 4 x float> [[WIDE_LOAD]]) -; NO-VP-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], [[TMP5]] +; NO-VP-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], [[TMP3]] ; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] ; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; NO-VP: middle.block: diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll index 464667d..afbed37a 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll @@ -12,12 +12,6 @@ define i32 @add(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: entry: ; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; IF-EVL: vector.ph: -; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; IF-EVL-NEXT: [[TMP9:%.*]] = insertelement <vscale x 4 x i32> zeroinitializer, i32 [[START:%.*]], i32 0 @@ -25,7 +19,7 @@ define i32 @add(ptr %a, i64 %n, i32 %start) { ; IF-EVL: vector.body: ; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[TMP9]], [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ] -; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]]) @@ -67,8 +61,6 @@ define i32 @add(ptr %a, i64 %n, i32 %start) { ; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; NO-VP-NEXT: [[TMP6:%.*]] = insertelement <vscale x 4 x i32> zeroinitializer, i32 [[START:%.*]], i32 0 ; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]] ; NO-VP: vector.body: @@ -77,7 +69,7 @@ define i32 @add(ptr %a, i64 %n, i32 %start) { ; NO-VP-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]] ; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP8]], align 4 ; NO-VP-NEXT: [[TMP10]] = add <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]] -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; NO-VP: middle.block: @@ -230,12 +222,6 @@ define i32 @or(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: entry: ; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; IF-EVL: vector.ph: -; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; IF-EVL-NEXT: [[TMP9:%.*]] = insertelement <vscale x 4 x i32> zeroinitializer, i32 [[START:%.*]], i32 0 @@ -243,7 +229,7 @@ define i32 @or(ptr %a, i64 %n, i32 %start) { ; IF-EVL: vector.body: ; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[TMP9]], [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ] -; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]]) @@ -285,8 +271,6 @@ define i32 @or(ptr %a, i64 %n, i32 %start) { ; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; NO-VP-NEXT: [[TMP6:%.*]] = insertelement <vscale x 4 x i32> zeroinitializer, i32 [[START:%.*]], i32 0 ; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]] ; NO-VP: vector.body: @@ -295,7 +279,7 @@ define i32 @or(ptr %a, i64 %n, i32 %start) { ; NO-VP-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]] ; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP8]], align 4 ; NO-VP-NEXT: [[TMP10]] = or <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]] -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; NO-VP: middle.block: @@ -341,12 +325,6 @@ define i32 @and(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: entry: ; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; IF-EVL: vector.ph: -; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; IF-EVL-NEXT: [[TMP9:%.*]] = insertelement <vscale x 4 x i32> splat (i32 -1), i32 [[START:%.*]], i32 0 @@ -354,7 +332,7 @@ define i32 @and(ptr %a, i64 %n, i32 %start) { ; IF-EVL: vector.body: ; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[TMP9]], [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ] -; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]]) @@ -396,8 +374,6 @@ define i32 @and(ptr %a, i64 %n, i32 %start) { ; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; NO-VP-NEXT: [[TMP6:%.*]] = insertelement <vscale x 4 x i32> splat (i32 -1), i32 [[START:%.*]], i32 0 ; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]] ; NO-VP: vector.body: @@ -406,7 +382,7 @@ define i32 @and(ptr %a, i64 %n, i32 %start) { ; NO-VP-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]] ; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP8]], align 4 ; NO-VP-NEXT: [[TMP10]] = and <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]] -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; NO-VP: middle.block: @@ -452,12 +428,6 @@ define i32 @xor(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: entry: ; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; IF-EVL: vector.ph: -; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; IF-EVL-NEXT: [[TMP9:%.*]] = insertelement <vscale x 4 x i32> zeroinitializer, i32 [[START:%.*]], i32 0 @@ -465,7 +435,7 @@ define i32 @xor(ptr %a, i64 %n, i32 %start) { ; IF-EVL: vector.body: ; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[TMP9]], [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ] -; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]]) @@ -507,8 +477,6 @@ define i32 @xor(ptr %a, i64 %n, i32 %start) { ; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; NO-VP-NEXT: [[TMP6:%.*]] = insertelement <vscale x 4 x i32> zeroinitializer, i32 [[START:%.*]], i32 0 ; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]] ; NO-VP: vector.body: @@ -517,7 +485,7 @@ define i32 @xor(ptr %a, i64 %n, i32 %start) { ; NO-VP-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]] ; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP8]], align 4 ; NO-VP-NEXT: [[TMP10]] = xor <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]] -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; NO-VP: middle.block: @@ -563,12 +531,6 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: entry: ; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; IF-EVL: vector.ph: -; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[START:%.*]], i64 0 @@ -577,7 +539,7 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) { ; IF-EVL: vector.body: ; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[BROADCAST_SPLAT]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ] -; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]]) @@ -621,8 +583,6 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) { ; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; NO-VP-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[START:%.*]], i64 0 ; NO-VP-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer ; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]] @@ -633,7 +593,7 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) { ; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 4 ; NO-VP-NEXT: [[TMP9:%.*]] = icmp slt <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]] ; NO-VP-NEXT: [[TMP10]] = select <vscale x 4 x i1> [[TMP9]], <vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> [[VEC_PHI]] -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; NO-VP: middle.block: @@ -681,12 +641,6 @@ define i32 @smax(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: entry: ; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; IF-EVL: vector.ph: -; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[START:%.*]], i64 0 @@ -695,7 +649,7 @@ define i32 @smax(ptr %a, i64 %n, i32 %start) { ; IF-EVL: vector.body: ; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[BROADCAST_SPLAT]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ] -; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]]) @@ -739,8 +693,6 @@ define i32 @smax(ptr %a, i64 %n, i32 %start) { ; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; NO-VP-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[START:%.*]], i64 0 ; NO-VP-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer ; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]] @@ -751,7 +703,7 @@ define i32 @smax(ptr %a, i64 %n, i32 %start) { ; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 4 ; NO-VP-NEXT: [[TMP9:%.*]] = icmp sgt <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]] ; NO-VP-NEXT: [[TMP10]] = select <vscale x 4 x i1> [[TMP9]], <vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> [[VEC_PHI]] -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; NO-VP: middle.block: @@ -799,12 +751,6 @@ define i32 @umin(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: entry: ; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; IF-EVL: vector.ph: -; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[START:%.*]], i64 0 @@ -813,7 +759,7 @@ define i32 @umin(ptr %a, i64 %n, i32 %start) { ; IF-EVL: vector.body: ; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[BROADCAST_SPLAT]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ] -; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]]) @@ -857,8 +803,6 @@ define i32 @umin(ptr %a, i64 %n, i32 %start) { ; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; NO-VP-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[START:%.*]], i64 0 ; NO-VP-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer ; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]] @@ -869,7 +813,7 @@ define i32 @umin(ptr %a, i64 %n, i32 %start) { ; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 4 ; NO-VP-NEXT: [[TMP9:%.*]] = icmp ult <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]] ; NO-VP-NEXT: [[TMP10]] = select <vscale x 4 x i1> [[TMP9]], <vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> [[VEC_PHI]] -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; NO-VP: middle.block: @@ -917,12 +861,6 @@ define i32 @umax(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: entry: ; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; IF-EVL: vector.ph: -; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[START:%.*]], i64 0 @@ -931,7 +869,7 @@ define i32 @umax(ptr %a, i64 %n, i32 %start) { ; IF-EVL: vector.body: ; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[BROADCAST_SPLAT]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ] -; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]]) @@ -975,8 +913,6 @@ define i32 @umax(ptr %a, i64 %n, i32 %start) { ; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; NO-VP-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[START:%.*]], i64 0 ; NO-VP-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer ; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]] @@ -987,7 +923,7 @@ define i32 @umax(ptr %a, i64 %n, i32 %start) { ; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 4 ; NO-VP-NEXT: [[TMP9:%.*]] = icmp ugt <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]] ; NO-VP-NEXT: [[TMP10]] = select <vscale x 4 x i1> [[TMP9]], <vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> [[VEC_PHI]] -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; NO-VP: middle.block: @@ -1035,12 +971,6 @@ define float @fadd(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: entry: ; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; IF-EVL: vector.ph: -; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; IF-EVL-NEXT: [[TMP9:%.*]] = insertelement <vscale x 4 x float> splat (float -0.000000e+00), float [[START:%.*]], i32 0 @@ -1048,7 +978,7 @@ define float @fadd(ptr %a, i64 %n, float %start) { ; IF-EVL: vector.body: ; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ [[TMP9]], [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ] -; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]]) @@ -1090,8 +1020,6 @@ define float @fadd(ptr %a, i64 %n, float %start) { ; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; NO-VP-NEXT: [[TMP6:%.*]] = insertelement <vscale x 4 x float> splat (float -0.000000e+00), float [[START:%.*]], i32 0 ; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]] ; NO-VP: vector.body: @@ -1100,7 +1028,7 @@ define float @fadd(ptr %a, i64 %n, float %start) { ; NO-VP-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]] ; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP8]], align 4 ; NO-VP-NEXT: [[TMP10]] = fadd reassoc <vscale x 4 x float> [[WIDE_LOAD]], [[VEC_PHI]] -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; NO-VP: middle.block: @@ -1253,12 +1181,6 @@ define float @fmin(ptr %a, i64 %n, float %start) #0 { ; IF-EVL-NEXT: entry: ; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; IF-EVL: vector.ph: -; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x float> poison, float [[START:%.*]], i64 0 @@ -1267,7 +1189,7 @@ define float @fmin(ptr %a, i64 %n, float %start) #0 { ; IF-EVL: vector.body: ; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ [[BROADCAST_SPLAT]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ] -; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]]) @@ -1311,8 +1233,6 @@ define float @fmin(ptr %a, i64 %n, float %start) #0 { ; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; NO-VP-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x float> poison, float [[START:%.*]], i64 0 ; NO-VP-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x float> [[BROADCAST_SPLATINSERT]], <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer ; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]] @@ -1323,7 +1243,7 @@ define float @fmin(ptr %a, i64 %n, float %start) #0 { ; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP7]], align 4 ; NO-VP-NEXT: [[TMP9:%.*]] = fcmp fast olt <vscale x 4 x float> [[WIDE_LOAD]], [[VEC_PHI]] ; NO-VP-NEXT: [[TMP10]] = select <vscale x 4 x i1> [[TMP9]], <vscale x 4 x float> [[WIDE_LOAD]], <vscale x 4 x float> [[VEC_PHI]] -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] ; NO-VP: middle.block: @@ -1371,12 +1291,6 @@ define float @fmax(ptr %a, i64 %n, float %start) #0 { ; IF-EVL-NEXT: entry: ; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; IF-EVL: vector.ph: -; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x float> poison, float [[START:%.*]], i64 0 @@ -1385,7 +1299,7 @@ define float @fmax(ptr %a, i64 %n, float %start) #0 { ; IF-EVL: vector.body: ; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ [[BROADCAST_SPLAT]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ] -; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]]) @@ -1429,8 +1343,6 @@ define float @fmax(ptr %a, i64 %n, float %start) #0 { ; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; NO-VP-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x float> poison, float [[START:%.*]], i64 0 ; NO-VP-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x float> [[BROADCAST_SPLATINSERT]], <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer ; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]] @@ -1441,7 +1353,7 @@ define float @fmax(ptr %a, i64 %n, float %start) #0 { ; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP7]], align 4 ; NO-VP-NEXT: [[TMP9:%.*]] = fcmp fast ogt <vscale x 4 x float> [[WIDE_LOAD]], [[VEC_PHI]] ; NO-VP-NEXT: [[TMP10]] = select <vscale x 4 x i1> [[TMP9]], <vscale x 4 x float> [[WIDE_LOAD]], <vscale x 4 x float> [[VEC_PHI]] -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] ; NO-VP: middle.block: @@ -1705,12 +1617,6 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n, float %start) { ; IF-EVL-NEXT: entry: ; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; IF-EVL: vector.ph: -; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; IF-EVL-NEXT: [[TMP9:%.*]] = insertelement <vscale x 4 x float> splat (float -0.000000e+00), float [[START:%.*]], i32 0 @@ -1718,7 +1624,7 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n, float %start) { ; IF-EVL: vector.body: ; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ [[TMP9]], [[VECTOR_PH]] ], [ [[TMP17:%.*]], [[VECTOR_BODY]] ] -; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]]) @@ -1764,8 +1670,6 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n, float %start) { ; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; NO-VP-NEXT: [[TMP6:%.*]] = insertelement <vscale x 4 x float> splat (float -0.000000e+00), float [[START:%.*]], i32 0 ; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]] ; NO-VP: vector.body: @@ -1776,7 +1680,7 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n, float %start) { ; NO-VP-NEXT: [[TMP10:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i64 [[INDEX]] ; NO-VP-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x float>, ptr [[TMP10]], align 4 ; NO-VP-NEXT: [[TMP12]] = call reassoc <vscale x 4 x float> @llvm.fmuladd.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD]], <vscale x 4 x float> [[WIDE_LOAD1]], <vscale x 4 x float> [[VEC_PHI]]) -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; NO-VP-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]] ; NO-VP: middle.block: @@ -1826,19 +1730,13 @@ define i32 @anyof_icmp(ptr %a, i64 %n, i32 %start, i32 %inv) { ; IF-EVL-NEXT: entry: ; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; IF-EVL: vector.ph: -; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]] ; IF-EVL: vector.body: ; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ] -; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]]) @@ -1883,8 +1781,6 @@ define i32 @anyof_icmp(ptr %a, i64 %n, i32 %start, i32 %inv) { ; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]] ; NO-VP: vector.body: ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -1893,7 +1789,7 @@ define i32 @anyof_icmp(ptr %a, i64 %n, i32 %start, i32 %inv) { ; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 4 ; NO-VP-NEXT: [[TMP9:%.*]] = icmp slt <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 3) ; NO-VP-NEXT: [[TMP10]] = or <vscale x 4 x i1> [[VEC_PHI]], [[TMP9]] -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP34:![0-9]+]] ; NO-VP: middle.block: @@ -1943,19 +1839,13 @@ define i32 @anyof_fcmp(ptr %a, i64 %n, i32 %start, i32 %inv) { ; IF-EVL-NEXT: entry: ; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; IF-EVL: vector.ph: -; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]] ; IF-EVL: vector.body: ; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ] -; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]]) @@ -2000,8 +1890,6 @@ define i32 @anyof_fcmp(ptr %a, i64 %n, i32 %start, i32 %inv) { ; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]] ; NO-VP: vector.body: ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -2010,7 +1898,7 @@ define i32 @anyof_fcmp(ptr %a, i64 %n, i32 %start, i32 %inv) { ; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP7]], align 4 ; NO-VP-NEXT: [[TMP9:%.*]] = fcmp fast olt <vscale x 4 x float> [[WIDE_LOAD]], splat (float 3.000000e+00) ; NO-VP-NEXT: [[TMP10]] = or <vscale x 4 x i1> [[VEC_PHI]], [[TMP9]] -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]] ; NO-VP: middle.block: diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll index 397cb95..3f378c7 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll @@ -12,12 +12,6 @@ define void @reverse_load_store(i64 %startval, ptr noalias %ptr, ptr noalias %pt ; IF-EVL-NEXT: entry: ; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; IF-EVL: vector.ph: -; IF-EVL-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; IF-EVL-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP2]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 ; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]] @@ -81,8 +75,6 @@ define void @reverse_load_store(i64 %startval, ptr noalias %ptr, ptr noalias %pt ; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[STARTVAL:%.*]], [[N_VEC]] ; NO-VP-NEXT: [[TMP7:%.*]] = trunc i64 [[N_VEC]] to i32 ; NO-VP-NEXT: br label [[FOR_BODY:%.*]] @@ -91,22 +83,22 @@ define void @reverse_load_store(i64 %startval, ptr noalias %ptr, ptr noalias %pt ; NO-VP-NEXT: [[OFFSET_IDX:%.*]] = sub i64 [[STARTVAL]], [[INDEX]] ; NO-VP-NEXT: [[TMP8:%.*]] = add i64 [[OFFSET_IDX]], -1 ; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[PTR:%.*]], i64 [[TMP8]] -; NO-VP-NEXT: [[TMP10:%.*]] = mul i64 0, [[TMP5]] -; NO-VP-NEXT: [[TMP11:%.*]] = sub i64 [[TMP5]], 1 +; NO-VP-NEXT: [[TMP10:%.*]] = mul i64 0, [[TMP3]] +; NO-VP-NEXT: [[TMP11:%.*]] = sub i64 [[TMP3]], 1 ; NO-VP-NEXT: [[TMP12:%.*]] = mul i64 -1, [[TMP11]] ; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[TMP9]], i64 [[TMP10]] ; NO-VP-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[TMP13]], i64 [[TMP12]] ; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP14]], align 4 ; NO-VP-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]]) ; NO-VP-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[PTR2:%.*]], i64 [[TMP8]] -; NO-VP-NEXT: [[TMP16:%.*]] = mul i64 0, [[TMP5]] -; NO-VP-NEXT: [[TMP17:%.*]] = sub i64 [[TMP5]], 1 +; NO-VP-NEXT: [[TMP16:%.*]] = mul i64 0, [[TMP3]] +; NO-VP-NEXT: [[TMP17:%.*]] = sub i64 [[TMP3]], 1 ; NO-VP-NEXT: [[TMP18:%.*]] = mul i64 -1, [[TMP17]] ; NO-VP-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[TMP15]], i64 [[TMP16]] ; NO-VP-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr [[TMP19]], i64 [[TMP18]] ; NO-VP-NEXT: [[REVERSE1:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[REVERSE]]) ; NO-VP-NEXT: store <vscale x 4 x i32> [[REVERSE1]], ptr [[TMP20]], align 4 -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; NO-VP-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; NO-VP: middle.block: @@ -154,12 +146,6 @@ define void @reverse_load_store_masked(i64 %startval, ptr noalias %ptr, ptr noal ; IF-EVL-NEXT: entry: ; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; IF-EVL: vector.ph: -; IF-EVL-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; IF-EVL-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP2]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 ; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]] @@ -236,8 +222,6 @@ define void @reverse_load_store_masked(i64 %startval, ptr noalias %ptr, ptr noal ; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[STARTVAL1:%.*]], [[N_VEC]] ; NO-VP-NEXT: [[TMP7:%.*]] = trunc i64 [[N_VEC]] to i32 ; NO-VP-NEXT: br label [[FOR_BODY:%.*]] @@ -250,8 +234,8 @@ define void @reverse_load_store_masked(i64 %startval, ptr noalias %ptr, ptr noal ; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP9]], align 4 ; NO-VP-NEXT: [[TMP10:%.*]] = icmp slt <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 100) ; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[PTR1:%.*]], i64 [[TMP8]] -; NO-VP-NEXT: [[TMP12:%.*]] = mul i64 0, [[TMP5]] -; NO-VP-NEXT: [[TMP13:%.*]] = sub i64 [[TMP5]], 1 +; NO-VP-NEXT: [[TMP12:%.*]] = mul i64 0, [[TMP3]] +; NO-VP-NEXT: [[TMP13:%.*]] = sub i64 [[TMP3]], 1 ; NO-VP-NEXT: [[TMP14:%.*]] = mul i64 -1, [[TMP13]] ; NO-VP-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[TMP11]], i64 [[TMP12]] ; NO-VP-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[TMP15]], i64 [[TMP14]] @@ -259,15 +243,15 @@ define void @reverse_load_store_masked(i64 %startval, ptr noalias %ptr, ptr noal ; NO-VP-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[TMP16]], i32 4, <vscale x 4 x i1> [[REVERSE]], <vscale x 4 x i32> poison) ; NO-VP-NEXT: [[REVERSE2:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[WIDE_MASKED_LOAD]]) ; NO-VP-NEXT: [[TMP17:%.*]] = getelementptr i32, ptr [[PTR2:%.*]], i64 [[TMP8]] -; NO-VP-NEXT: [[TMP18:%.*]] = mul i64 0, [[TMP5]] -; NO-VP-NEXT: [[TMP19:%.*]] = sub i64 [[TMP5]], 1 +; NO-VP-NEXT: [[TMP18:%.*]] = mul i64 0, [[TMP3]] +; NO-VP-NEXT: [[TMP19:%.*]] = sub i64 [[TMP3]], 1 ; NO-VP-NEXT: [[TMP20:%.*]] = mul i64 -1, [[TMP19]] ; NO-VP-NEXT: [[TMP21:%.*]] = getelementptr i32, ptr [[TMP17]], i64 [[TMP18]] ; NO-VP-NEXT: [[TMP22:%.*]] = getelementptr i32, ptr [[TMP21]], i64 [[TMP20]] ; NO-VP-NEXT: [[REVERSE3:%.*]] = call <vscale x 4 x i1> @llvm.vector.reverse.nxv4i1(<vscale x 4 x i1> [[TMP10]]) ; NO-VP-NEXT: [[REVERSE4:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[REVERSE2]]) ; NO-VP-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[REVERSE4]], ptr [[TMP22]], i32 4, <vscale x 4 x i1> [[REVERSE3]]) -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; NO-VP-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP23]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; NO-VP: middle.block: @@ -334,12 +318,6 @@ define void @multiple_reverse_vector_pointer(ptr noalias %a, ptr noalias %b, ptr ; IF-EVL-NEXT: entry: ; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; IF-EVL: vector.ph: -; IF-EVL-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 16 -; IF-EVL-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP2]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 16 ; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]] diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll index 2ec23b91..e32af06 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll @@ -15,12 +15,6 @@ define void @test(ptr %p) { ; IF-EVL-NEXT: entry: ; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; IF-EVL: vector.ph: -; IF-EVL-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; IF-EVL-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 200, [[TMP2]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2 ; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]] @@ -67,8 +61,6 @@ define void @test(ptr %p) { ; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 200, [[TMP3]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 200, [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; NO-VP-NEXT: br label [[LOOP:%.*]] ; NO-VP: vector.body: ; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] @@ -77,7 +69,7 @@ define void @test(ptr %p) { ; NO-VP-NEXT: [[TMP7:%.*]] = add i64 [[IV]], 200 ; NO-VP-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP7]] ; NO-VP-NEXT: store <vscale x 2 x i64> [[WIDE_LOAD]], ptr [[TMP8]], align 8 -; NO-VP-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], [[TMP5]] +; NO-VP-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], [[TMP3]] ; NO-VP-NEXT: [[TMP9:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[LOOP]], !llvm.loop [[LOOP0:![0-9]+]] ; NO-VP: middle.block: @@ -346,12 +338,6 @@ define void @trivial_due_max_vscale(ptr %p) { ; IF-EVL-NEXT: entry: ; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; IF-EVL: vector.ph: -; IF-EVL-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; IF-EVL-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 200, [[TMP2]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2 ; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]] @@ -398,8 +384,6 @@ define void @trivial_due_max_vscale(ptr %p) { ; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 200, [[TMP3]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 200, [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; NO-VP-NEXT: br label [[LOOP:%.*]] ; NO-VP: vector.body: ; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] @@ -408,7 +392,7 @@ define void @trivial_due_max_vscale(ptr %p) { ; NO-VP-NEXT: [[TMP7:%.*]] = add i64 [[IV]], 8192 ; NO-VP-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP7]] ; NO-VP-NEXT: store <vscale x 2 x i64> [[WIDE_LOAD]], ptr [[TMP8]], align 32 -; NO-VP-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], [[TMP5]] +; NO-VP-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], [[TMP3]] ; NO-VP-NEXT: [[TMP9:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[LOOP]], !llvm.loop [[LOOP8:![0-9]+]] ; NO-VP: middle.block: @@ -454,11 +438,6 @@ define void @no_high_lmul_or_interleave(ptr %p) { ; IF-EVL-NEXT: entry: ; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; IF-EVL: vector.ph: -; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP1:%.*]] = sub i64 [[TMP7]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 3002, [[TMP1]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP7]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]] ; IF-EVL: vector.body: diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll index ab05166..aad4dcd 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll @@ -15,12 +15,6 @@ define void @lshift_significand(i32 %n, ptr nocapture writeonly %dst) { ; CHECK-NEXT: [[TMP0:%.*]] = sub i64 3, [[SPEC_SELECT]] ; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2 -; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], 1 -; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP0]], [[TMP7]] -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP6]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 2 ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll index 034b767..22c9b2e 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll @@ -8,15 +8,8 @@ define void @test_pr98413_zext_removed(ptr %src, ptr noalias %dst, i64 %x) { ; CHECK-LABEL: define void @test_pr98413_zext_removed( ; CHECK-SAME: ptr [[SRC:%.*]], ptr noalias [[DST:%.*]], i64 [[X:%.*]]) #[[ATTR0:[0-9]+]] { ; CHECK-NEXT: [[ENTRY:.*]]: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 97, [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 97, [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 97, [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[X]], i64 0 @@ -24,24 +17,27 @@ define void @test_pr98413_zext_removed(ptr %src, ptr noalias %dst, i64 %x) { ; CHECK-NEXT: [[TMP6:%.*]] = trunc <vscale x 8 x i64> [[BROADCAST_SPLAT]] to <vscale x 8 x i8> ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[TMP7:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP7:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 97, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true) ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i16, ptr [[SRC]], i64 [[TMP7]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i16>, ptr [[TMP8]], align 8 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 8 x i16> @llvm.vp.load.nxv8i16.p0(ptr align 8 [[TMP8]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP13]]) ; CHECK-NEXT: [[TMP10:%.*]] = trunc <vscale x 8 x i16> [[WIDE_LOAD]] to <vscale x 8 x i8> ; CHECK-NEXT: [[TMP11:%.*]] = and <vscale x 8 x i8> [[TMP6]], [[TMP10]] ; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[TMP7]] -; CHECK-NEXT: store <vscale x 8 x i8> [[TMP11]], ptr [[TMP12]], align 1 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP7]], [[TMP5]] -; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-NEXT: call void @llvm.vp.store.nxv8i8.p0(<vscale x 8 x i8> [[TMP11]], ptr align 1 [[TMP12]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP13]]) +; CHECK-NEXT: [[TMP14:%.*]] = zext i32 [[TMP13]] to i64 +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP14]], [[TMP7]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP14]] +; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 97 +; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 97, [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; CHECK-NEXT: br label %[[EXIT:.*]] ; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[LOOP:.*]] ; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] ; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr inbounds i16, ptr [[SRC]], i64 [[IV]] ; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP_SRC]], align 8 ; CHECK-NEXT: [[EXT_L:%.*]] = zext i16 [[L]] to i64 @@ -51,7 +47,7 @@ define void @test_pr98413_zext_removed(ptr %src, ptr noalias %dst, i64 %x) { ; CHECK-NEXT: store i8 [[TRUNC_AND]], ptr [[GEP_DST]], align 1 ; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV]], 96 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -79,15 +75,8 @@ define void @test_pr98413_sext_removed(ptr %src, ptr noalias %dst, i64 %x) { ; CHECK-LABEL: define void @test_pr98413_sext_removed( ; CHECK-SAME: ptr [[SRC:%.*]], ptr noalias [[DST:%.*]], i64 [[X:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*]]: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 97, [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 97, [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 97, [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[X]], i64 0 @@ -95,24 +84,27 @@ define void @test_pr98413_sext_removed(ptr %src, ptr noalias %dst, i64 %x) { ; CHECK-NEXT: [[TMP6:%.*]] = trunc <vscale x 8 x i64> [[BROADCAST_SPLAT]] to <vscale x 8 x i8> ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[TMP7:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP7:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 97, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true) ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i16, ptr [[SRC]], i64 [[TMP7]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i16>, ptr [[TMP8]], align 8 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 8 x i16> @llvm.vp.load.nxv8i16.p0(ptr align 8 [[TMP8]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP13]]) ; CHECK-NEXT: [[TMP10:%.*]] = trunc <vscale x 8 x i16> [[WIDE_LOAD]] to <vscale x 8 x i8> ; CHECK-NEXT: [[TMP11:%.*]] = and <vscale x 8 x i8> [[TMP6]], [[TMP10]] ; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[TMP7]] -; CHECK-NEXT: store <vscale x 8 x i8> [[TMP11]], ptr [[TMP12]], align 1 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP7]], [[TMP5]] -; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: call void @llvm.vp.store.nxv8i8.p0(<vscale x 8 x i8> [[TMP11]], ptr align 1 [[TMP12]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP13]]) +; CHECK-NEXT: [[TMP14:%.*]] = zext i32 [[TMP13]] to i64 +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP14]], [[TMP7]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP14]] +; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 97 +; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 97, [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; CHECK-NEXT: br label %[[EXIT:.*]] ; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[LOOP:.*]] ; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] ; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr inbounds i16, ptr [[SRC]], i64 [[IV]] ; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP_SRC]], align 8 ; CHECK-NEXT: [[EXT_L:%.*]] = sext i16 [[L]] to i64 @@ -122,7 +114,7 @@ define void @test_pr98413_sext_removed(ptr %src, ptr noalias %dst, i64 %x) { ; CHECK-NEXT: store i8 [[TRUNC_AND]], ptr [[GEP_DST]], align 1 ; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV]], 96 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -153,12 +145,6 @@ define void @truncate_to_i1_used_by_branch(i8 %x, ptr %dst) #0 { ; CHECK-NEXT: [[ENTRY:.*]]: ; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vscale.i32() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i32 [[TMP2]], 4 -; CHECK-NEXT: [[TMP12:%.*]] = sub i32 [[TMP3]], 1 -; CHECK-NEXT: [[N_RND_UP:%.*]] = add i32 9, [[TMP12]] -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N_RND_UP]], [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[N_RND_UP]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.vscale.i32() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i32 [[TMP10]], 4 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 4 x ptr> poison, ptr [[DST]], i64 0 @@ -172,7 +158,7 @@ define void @truncate_to_i1_used_by_branch(i8 %x, ptr %dst) #0 { ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i32 [[TMP6]], [[EVL_BASED_IV]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i32 [[AVL]], [[TMP6]] ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_EVL_NEXT]], 9 -; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] ; CHECK: [[SCALAR_PH]]: @@ -259,15 +245,8 @@ define void @icmp_only_first_op_truncated(ptr noalias %dst, i32 %x, i64 %N, i64 ; CHECK-NEXT: [[ENTRY:.*]]: ; CHECK-NEXT: [[T:%.*]] = trunc i64 [[N]] to i32 ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[V]], 1 -; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP1]], 2 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP4]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[N]], i64 0 @@ -284,20 +263,23 @@ define void @icmp_only_first_op_truncated(ptr noalias %dst, i32 %x, i64 %N, i64 ; CHECK-NEXT: [[BROADCAST_SPLAT6:%.*]] = shufflevector <vscale x 2 x ptr> [[BROADCAST_SPLATINSERT3]], <vscale x 2 x ptr> poison, <vscale x 2 x i32> zeroinitializer ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64.nxv2p0(<vscale x 2 x ptr> [[BROADCAST_SPLAT4]], i32 8, <vscale x 2 x i1> [[TMP8]], <vscale x 2 x double> poison) -; CHECK-NEXT: call void @llvm.masked.scatter.nxv2f64.nxv2p0(<vscale x 2 x double> [[WIDE_MASKED_GATHER]], <vscale x 2 x ptr> [[BROADCAST_SPLAT6]], i32 8, <vscale x 2 x i1> [[TMP8]]) -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] -; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP11]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP0]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) +; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x double> @llvm.vp.gather.nxv2f64.nxv2p0(<vscale x 2 x ptr> align 8 [[BROADCAST_SPLAT4]], <vscale x 2 x i1> [[TMP8]], i32 [[TMP14]]) +; CHECK-NEXT: call void @llvm.vp.scatter.nxv2f64.nxv2p0(<vscale x 2 x double> [[WIDE_MASKED_GATHER]], <vscale x 2 x ptr> align 8 [[BROADCAST_SPLAT6]], <vscale x 2 x i1> [[TMP8]], i32 [[TMP14]]) +; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP14]] to i64 +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP11]], [[EVL_BASED_IV]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]] +; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TMP0]] +; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; CHECK-NEXT: br label %[[EXIT:.*]] ; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] ; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] ; CHECK-NEXT: [[T1:%.*]] = trunc i64 [[N]] to i32 ; CHECK-NEXT: [[C:%.*]] = icmp eq i32 [[T1]], [[T]] ; CHECK-NEXT: br i1 [[C]], label %[[THEN:.*]], label %[[LOOP_LATCH]] @@ -344,15 +326,15 @@ attributes #0 = { "target-features"="+64bit,+v,+zvl256b" } attributes #1 = { "target-features"="+64bit,+v" } ;. -; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} +; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]], [[META3:![0-9]+]]} ; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} -; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} -; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]} -; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]} -; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META2]], [[META1]]} -; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META7:![0-9]+]], [[META2]]} -; CHECK: [[META7]] = !{!"llvm.loop.isvectorized.tailfoldingstyle", !"evl"} -; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META2]], [[META1]]} -; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META1]], [[META2]]} -; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META2]], [[META1]]} +; CHECK: [[META2]] = !{!"llvm.loop.isvectorized.tailfoldingstyle", !"evl"} +; CHECK: [[META3]] = !{!"llvm.loop.unroll.runtime.disable"} +; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META3]], [[META1]]} +; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]], [[META3]]} +; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META3]], [[META1]]} +; CHECK: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]], [[META2]], [[META3]]} +; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META3]], [[META1]]} +; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META1]], [[META2]], [[META3]]} +; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META3]], [[META1]]} ;. diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll index 01edeed..661fd28 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll @@ -11,12 +11,6 @@ define void @truncate_to_minimal_bitwidths_widen_cast_recipe(ptr %src) { ; CHECK-NEXT: [[ENTRY:.*]]: ; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8 -; CHECK-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 -; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 9, [[TMP2]] -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 8 ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/type-info-cache-evl-crash.ll b/llvm/test/Transforms/LoopVectorize/RISCV/type-info-cache-evl-crash.ll index 6476373..c1fedd9 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/type-info-cache-evl-crash.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/type-info-cache-evl-crash.ll @@ -20,12 +20,6 @@ define void @type_info_cache_clobber(ptr %dstv, ptr %src, i64 %wide.trip.count) ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] ; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 8 -; CHECK-NEXT: [[TMP8:%.*]] = sub i64 [[TMP7]], 1 -; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP0]], [[TMP8]] -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP7]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 8 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 8 x ptr> poison, ptr [[DSTV]], i64 0 diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll index d97e93d..ae894d1 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll @@ -10,42 +10,38 @@ define void @uniform_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i6 ; SCALABLE-LABEL: define void @uniform_load( ; SCALABLE-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] { ; SCALABLE-NEXT: [[ENTRY:.*]]: -; SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; SCALABLE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]] -; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; SCALABLE-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; SCALABLE: [[VECTOR_PH]]: -; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 -; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]] -; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] ; SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; SCALABLE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]] ; SCALABLE: [[VECTOR_BODY]]: -; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1025, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; SCALABLE-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) ; SCALABLE-NEXT: [[TMP7:%.*]] = load i64, ptr [[B]], align 8 ; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP7]], i64 0 ; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer ; SCALABLE-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] -; SCALABLE-NEXT: store <vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr [[TMP8]], align 8 -; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; SCALABLE-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; SCALABLE-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr align 8 [[TMP8]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]]) +; SCALABLE-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 +; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP11]], [[INDEX]] +; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]] +; SCALABLE-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025 +; SCALABLE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; SCALABLE: [[MIDDLE_BLOCK]]: -; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]] -; SCALABLE-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]] +; SCALABLE-NEXT: br label %[[FOR_END:.*]] ; SCALABLE: [[SCALAR_PH]]: -; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] ; SCALABLE-NEXT: br label %[[FOR_BODY:.*]] ; SCALABLE: [[FOR_BODY]]: -; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] +; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] ; SCALABLE-NEXT: [[V:%.*]] = load i64, ptr [[B]], align 8 ; SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] ; SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 ; SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; SCALABLE: [[FOR_END]]: ; SCALABLE-NEXT: ret void ; @@ -88,12 +84,6 @@ define void @uniform_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i6 ; TF-SCALABLE-NEXT: [[ENTRY:.*]]: ; TF-SCALABLE-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; TF-SCALABLE: [[VECTOR_PH]]: -; TF-SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; TF-SCALABLE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; TF-SCALABLE-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 -; TF-SCALABLE-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP2]] -; TF-SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; TF-SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; TF-SCALABLE-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; TF-SCALABLE-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2 ; TF-SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]] @@ -156,8 +146,6 @@ define i64 @uniform_load_outside_use(ptr noalias nocapture %a, ptr noalias nocap ; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 ; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]] ; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] -; SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; SCALABLE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]] ; SCALABLE: [[VECTOR_BODY]]: ; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -166,9 +154,9 @@ define i64 @uniform_load_outside_use(ptr noalias nocapture %a, ptr noalias nocap ; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer ; SCALABLE-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] ; SCALABLE-NEXT: store <vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr [[TMP8]], align 8 -; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; SCALABLE-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; SCALABLE-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; SCALABLE-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; SCALABLE: [[MIDDLE_BLOCK]]: ; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]] ; SCALABLE-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]] @@ -182,7 +170,7 @@ define i64 @uniform_load_outside_use(ptr noalias nocapture %a, ptr noalias nocap ; SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 ; SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; SCALABLE: [[FOR_END]]: ; SCALABLE-NEXT: [[V_LCSSA:%.*]] = phi i64 [ [[V]], %[[FOR_BODY]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ] ; SCALABLE-NEXT: ret i64 [[V_LCSSA]] @@ -234,8 +222,6 @@ define i64 @uniform_load_outside_use(ptr noalias nocapture %a, ptr noalias nocap ; TF-SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 ; TF-SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]] ; TF-SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] -; TF-SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; TF-SCALABLE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; TF-SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]] ; TF-SCALABLE: [[VECTOR_BODY]]: ; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -244,7 +230,7 @@ define i64 @uniform_load_outside_use(ptr noalias nocapture %a, ptr noalias nocap ; TF-SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer ; TF-SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] ; TF-SCALABLE-NEXT: store <vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr [[ARRAYIDX]], align 8 -; TF-SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP5]] +; TF-SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP3]] ; TF-SCALABLE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; TF-SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; TF-SCALABLE: [[MIDDLE_BLOCK]]: @@ -286,15 +272,8 @@ define void @conditional_uniform_load(ptr noalias nocapture %a, ptr noalias noca ; SCALABLE-LABEL: define void @conditional_uniform_load( ; SCALABLE-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { ; SCALABLE-NEXT: [[ENTRY:.*]]: -; SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; SCALABLE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]] -; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; SCALABLE-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; SCALABLE: [[VECTOR_PH]]: -; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]] -; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] ; SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; SCALABLE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x ptr> poison, ptr [[B]], i64 0 @@ -302,30 +281,39 @@ define void @conditional_uniform_load(ptr noalias nocapture %a, ptr noalias noca ; SCALABLE-NEXT: [[TMP6:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64() ; SCALABLE-NEXT: [[TMP7:%.*]] = mul <vscale x 4 x i64> [[TMP6]], splat (i64 1) ; SCALABLE-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP7]] -; SCALABLE-NEXT: [[TMP9:%.*]] = mul i64 1, [[TMP5]] -; SCALABLE-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP9]], i64 0 -; SCALABLE-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer ; SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]] ; SCALABLE: [[VECTOR_BODY]]: -; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] ; SCALABLE-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1025, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; SCALABLE-NEXT: [[TMP17:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) +; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP17]], i64 0 +; SCALABLE-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT3]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer +; SCALABLE-NEXT: [[TMP8:%.*]] = zext i32 [[TMP17]] to i64 +; SCALABLE-NEXT: [[TMP9:%.*]] = mul i64 1, [[TMP8]] +; SCALABLE-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP9]], i64 0 +; SCALABLE-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer +; SCALABLE-NEXT: [[TMP18:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32() +; SCALABLE-NEXT: [[TMP11:%.*]] = icmp ult <vscale x 4 x i32> [[TMP18]], [[BROADCAST_SPLAT4]] ; SCALABLE-NEXT: [[TMP10:%.*]] = icmp ugt <vscale x 4 x i64> [[VEC_IND]], splat (i64 10) -; SCALABLE-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i64> @llvm.masked.gather.nxv4i64.nxv4p0(<vscale x 4 x ptr> [[BROADCAST_SPLAT]], i32 8, <vscale x 4 x i1> [[TMP10]], <vscale x 4 x i64> poison) -; SCALABLE-NEXT: [[PREDPHI:%.*]] = select <vscale x 4 x i1> [[TMP10]], <vscale x 4 x i64> [[WIDE_MASKED_GATHER]], <vscale x 4 x i64> zeroinitializer +; SCALABLE-NEXT: [[TMP13:%.*]] = select <vscale x 4 x i1> [[TMP11]], <vscale x 4 x i1> [[TMP10]], <vscale x 4 x i1> zeroinitializer +; SCALABLE-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i64> @llvm.vp.gather.nxv4i64.nxv4p0(<vscale x 4 x ptr> align 8 [[BROADCAST_SPLAT]], <vscale x 4 x i1> [[TMP10]], i32 [[TMP17]]) +; SCALABLE-NEXT: [[PREDPHI:%.*]] = select <vscale x 4 x i1> [[TMP13]], <vscale x 4 x i64> [[WIDE_MASKED_GATHER]], <vscale x 4 x i64> zeroinitializer ; SCALABLE-NEXT: [[TMP12:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] -; SCALABLE-NEXT: store <vscale x 4 x i64> [[PREDPHI]], ptr [[TMP12]], align 8 -; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; SCALABLE-NEXT: call void @llvm.vp.store.nxv4i64.p0(<vscale x 4 x i64> [[PREDPHI]], ptr align 8 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP17]]) +; SCALABLE-NEXT: [[TMP15:%.*]] = zext i32 [[TMP17]] to i64 +; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP15]], [[INDEX]] +; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]] ; SCALABLE-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]] -; SCALABLE-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; SCALABLE-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; SCALABLE-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025 +; SCALABLE-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; SCALABLE: [[MIDDLE_BLOCK]]: -; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]] -; SCALABLE-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]] +; SCALABLE-NEXT: br label %[[FOR_END:.*]] ; SCALABLE: [[SCALAR_PH]]: -; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] ; SCALABLE-NEXT: br label %[[FOR_BODY:.*]] ; SCALABLE: [[FOR_BODY]]: -; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ] +; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ] ; SCALABLE-NEXT: [[CMP:%.*]] = icmp ugt i64 [[IV]], 10 ; SCALABLE-NEXT: br i1 [[CMP]], label %[[DO_LOAD:.*]], label %[[LATCH]] ; SCALABLE: [[DO_LOAD]]: @@ -337,7 +325,7 @@ define void @conditional_uniform_load(ptr noalias nocapture %a, ptr noalias noca ; SCALABLE-NEXT: store i64 [[PHI]], ptr [[ARRAYIDX]], align 8 ; SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; SCALABLE: [[FOR_END]]: ; SCALABLE-NEXT: ret void ; @@ -394,12 +382,6 @@ define void @conditional_uniform_load(ptr noalias nocapture %a, ptr noalias noca ; TF-SCALABLE-NEXT: [[ENTRY:.*]]: ; TF-SCALABLE-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; TF-SCALABLE: [[VECTOR_PH]]: -; TF-SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; TF-SCALABLE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; TF-SCALABLE-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 -; TF-SCALABLE-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP2]] -; TF-SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; TF-SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; TF-SCALABLE-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; TF-SCALABLE-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 ; TF-SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x ptr> poison, ptr [[B]], i64 0 @@ -482,42 +464,38 @@ define void @uniform_load_unaligned(ptr noalias nocapture %a, ptr noalias nocapt ; SCALABLE-LABEL: define void @uniform_load_unaligned( ; SCALABLE-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { ; SCALABLE-NEXT: [[ENTRY:.*]]: -; SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; SCALABLE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]] -; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; SCALABLE-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; SCALABLE: [[VECTOR_PH]]: -; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 -; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]] -; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] ; SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; SCALABLE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]] ; SCALABLE: [[VECTOR_BODY]]: -; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1025, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; SCALABLE-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) ; SCALABLE-NEXT: [[TMP6:%.*]] = load i64, ptr [[B]], align 1 ; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP6]], i64 0 ; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer ; SCALABLE-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] -; SCALABLE-NEXT: store <vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr [[TMP8]], align 8 -; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; SCALABLE-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; SCALABLE-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr align 8 [[TMP8]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]]) +; SCALABLE-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 +; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP11]], [[INDEX]] +; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]] +; SCALABLE-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025 +; SCALABLE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; SCALABLE: [[MIDDLE_BLOCK]]: -; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]] -; SCALABLE-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]] +; SCALABLE-NEXT: br label %[[FOR_END:.*]] ; SCALABLE: [[SCALAR_PH]]: -; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] ; SCALABLE-NEXT: br label %[[FOR_BODY:.*]] ; SCALABLE: [[FOR_BODY]]: -; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] +; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] ; SCALABLE-NEXT: [[V:%.*]] = load i64, ptr [[B]], align 1 ; SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] ; SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 ; SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; SCALABLE: [[FOR_END]]: ; SCALABLE-NEXT: ret void ; @@ -560,12 +538,6 @@ define void @uniform_load_unaligned(ptr noalias nocapture %a, ptr noalias nocapt ; TF-SCALABLE-NEXT: [[ENTRY:.*]]: ; TF-SCALABLE-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; TF-SCALABLE: [[VECTOR_PH]]: -; TF-SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; TF-SCALABLE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; TF-SCALABLE-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 -; TF-SCALABLE-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP2]] -; TF-SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; TF-SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; TF-SCALABLE-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; TF-SCALABLE-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2 ; TF-SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]] @@ -619,42 +591,38 @@ define void @uniform_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i ; SCALABLE-LABEL: define void @uniform_store( ; SCALABLE-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]], i64 [[V:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { ; SCALABLE-NEXT: [[ENTRY:.*]]: -; SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; SCALABLE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]] -; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; SCALABLE-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; SCALABLE: [[VECTOR_PH]]: -; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 -; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]] -; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] ; SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; SCALABLE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V]], i64 0 ; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer ; SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]] ; SCALABLE: [[VECTOR_BODY]]: -; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1025, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; SCALABLE-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) ; SCALABLE-NEXT: store i64 [[V]], ptr [[B]], align 8 ; SCALABLE-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] -; SCALABLE-NEXT: store <vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr [[TMP7]], align 8 -; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; SCALABLE-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; SCALABLE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP9]]) +; SCALABLE-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 +; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP10]], [[INDEX]] +; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]] +; SCALABLE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025 +; SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; SCALABLE: [[MIDDLE_BLOCK]]: -; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]] -; SCALABLE-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]] +; SCALABLE-NEXT: br label %[[FOR_END:.*]] ; SCALABLE: [[SCALAR_PH]]: -; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] ; SCALABLE-NEXT: br label %[[FOR_BODY:.*]] ; SCALABLE: [[FOR_BODY]]: -; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] +; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] ; SCALABLE-NEXT: store i64 [[V]], ptr [[B]], align 8 ; SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] ; SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 ; SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; SCALABLE: [[FOR_END]]: ; SCALABLE-NEXT: ret void ; @@ -697,12 +665,6 @@ define void @uniform_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i ; TF-SCALABLE-NEXT: [[ENTRY:.*]]: ; TF-SCALABLE-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; TF-SCALABLE: [[VECTOR_PH]]: -; TF-SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; TF-SCALABLE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; TF-SCALABLE-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 -; TF-SCALABLE-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP2]] -; TF-SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; TF-SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; TF-SCALABLE-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; TF-SCALABLE-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2 ; TF-SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V]], i64 0 @@ -756,54 +718,49 @@ define void @uniform_store_of_loop_varying(ptr noalias nocapture %a, ptr noalias ; SCALABLE-LABEL: define void @uniform_store_of_loop_varying( ; SCALABLE-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]], i64 [[V:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { ; SCALABLE-NEXT: [[ENTRY:.*]]: -; SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; SCALABLE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]] -; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; SCALABLE-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; SCALABLE: [[VECTOR_PH]]: -; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 -; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]] -; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] ; SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; SCALABLE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 +; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 2 x ptr> poison, ptr [[B]], i64 0 +; SCALABLE-NEXT: [[BROADCAST_SPLAT1:%.*]] = shufflevector <vscale x 2 x ptr> [[BROADCAST_SPLATINSERT1]], <vscale x 2 x ptr> poison, <vscale x 2 x i32> zeroinitializer ; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V]], i64 0 ; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer +; SCALABLE-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64() +; SCALABLE-NEXT: [[TMP13:%.*]] = mul <vscale x 2 x i64> [[TMP6]], splat (i64 1) +; SCALABLE-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP13]] ; SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]] ; SCALABLE: [[VECTOR_BODY]]: -; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; SCALABLE-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64() +; SCALABLE-NEXT: [[TMP10:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; SCALABLE-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1025, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; SCALABLE-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) +; SCALABLE-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 +; SCALABLE-NEXT: [[INDEX:%.*]] = mul i64 1, [[TMP8]] ; SCALABLE-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[INDEX]], i64 0 ; SCALABLE-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer -; SCALABLE-NEXT: [[TMP7:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP6]] -; SCALABLE-NEXT: [[TMP8:%.*]] = mul <vscale x 2 x i64> [[TMP7]], splat (i64 1) -; SCALABLE-NEXT: [[TMP9:%.*]] = add <vscale x 2 x i64> [[DOTSPLAT]], [[TMP8]] -; SCALABLE-NEXT: [[TMP10:%.*]] = add i64 [[INDEX]], 0 -; SCALABLE-NEXT: [[TMP11:%.*]] = add i64 [[INDEX]], 1 -; SCALABLE-NEXT: [[TMP12:%.*]] = call i32 @llvm.vscale.i32() -; SCALABLE-NEXT: [[TMP13:%.*]] = mul nuw i32 [[TMP12]], 2 -; SCALABLE-NEXT: [[TMP14:%.*]] = sub i32 [[TMP13]], 1 -; SCALABLE-NEXT: [[TMP15:%.*]] = extractelement <vscale x 2 x i64> [[TMP9]], i32 [[TMP14]] -; SCALABLE-NEXT: store i64 [[TMP15]], ptr [[B]], align 8 +; SCALABLE-NEXT: call void @llvm.vp.scatter.nxv2i64.nxv2p0(<vscale x 2 x i64> [[VEC_IND]], <vscale x 2 x ptr> align 8 [[BROADCAST_SPLAT1]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP7]]) ; SCALABLE-NEXT: [[TMP16:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[TMP10]] -; SCALABLE-NEXT: store <vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr [[TMP16]], align 8 -; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; SCALABLE-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; SCALABLE-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr align 8 [[TMP16]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP7]]) +; SCALABLE-NEXT: [[TMP11:%.*]] = zext i32 [[TMP7]] to i64 +; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP11]], [[TMP10]] +; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]] +; SCALABLE-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[DOTSPLAT]] +; SCALABLE-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025 +; SCALABLE-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; SCALABLE: [[MIDDLE_BLOCK]]: -; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]] -; SCALABLE-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]] +; SCALABLE-NEXT: br label %[[FOR_END:.*]] ; SCALABLE: [[SCALAR_PH]]: -; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] ; SCALABLE-NEXT: br label %[[FOR_BODY:.*]] ; SCALABLE: [[FOR_BODY]]: -; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] +; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] ; SCALABLE-NEXT: store i64 [[IV]], ptr [[B]], align 8 ; SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] ; SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 ; SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; SCALABLE: [[FOR_END]]: ; SCALABLE-NEXT: ret void ; @@ -850,12 +807,6 @@ define void @uniform_store_of_loop_varying(ptr noalias nocapture %a, ptr noalias ; TF-SCALABLE-NEXT: [[ENTRY:.*]]: ; TF-SCALABLE-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; TF-SCALABLE: [[VECTOR_PH]]: -; TF-SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; TF-SCALABLE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; TF-SCALABLE-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 -; TF-SCALABLE-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP2]] -; TF-SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; TF-SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; TF-SCALABLE-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; TF-SCALABLE-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2 ; TF-SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x ptr> poison, ptr [[B]], i64 0 @@ -920,15 +871,8 @@ define void @conditional_uniform_store(ptr noalias nocapture %a, ptr noalias noc ; SCALABLE-LABEL: define void @conditional_uniform_store( ; SCALABLE-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]], i64 [[V:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { ; SCALABLE-NEXT: [[ENTRY:.*]]: -; SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; SCALABLE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]] -; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; SCALABLE-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; SCALABLE: [[VECTOR_PH]]: -; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 -; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]] -; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] ; SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; SCALABLE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V]], i64 0 @@ -938,29 +882,33 @@ define void @conditional_uniform_store(ptr noalias nocapture %a, ptr noalias noc ; SCALABLE-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64() ; SCALABLE-NEXT: [[TMP8:%.*]] = mul <vscale x 2 x i64> [[TMP6]], splat (i64 1) ; SCALABLE-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP8]] -; SCALABLE-NEXT: [[TMP9:%.*]] = mul i64 1, [[TMP5]] -; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP9]], i64 0 -; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer ; SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]] ; SCALABLE: [[VECTOR_BODY]]: -; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] ; SCALABLE-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1025, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; SCALABLE-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) +; SCALABLE-NEXT: [[TMP14:%.*]] = zext i32 [[TMP7]] to i64 +; SCALABLE-NEXT: [[TMP9:%.*]] = mul i64 1, [[TMP14]] +; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP9]], i64 0 +; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer ; SCALABLE-NEXT: [[TMP10:%.*]] = icmp ugt <vscale x 2 x i64> [[VEC_IND]], splat (i64 10) -; SCALABLE-NEXT: call void @llvm.masked.scatter.nxv2i64.nxv2p0(<vscale x 2 x i64> [[BROADCAST_SPLAT1]], <vscale x 2 x ptr> [[BROADCAST_SPLAT2]], i32 8, <vscale x 2 x i1> [[TMP10]]) +; SCALABLE-NEXT: call void @llvm.vp.scatter.nxv2i64.nxv2p0(<vscale x 2 x i64> [[BROADCAST_SPLAT1]], <vscale x 2 x ptr> align 8 [[BROADCAST_SPLAT2]], <vscale x 2 x i1> [[TMP10]], i32 [[TMP7]]) ; SCALABLE-NEXT: [[TMP12:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] -; SCALABLE-NEXT: store <vscale x 2 x i64> [[BROADCAST_SPLAT1]], ptr [[TMP12]], align 8 -; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT1]], ptr align 8 [[TMP12]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP7]]) +; SCALABLE-NEXT: [[TMP15:%.*]] = zext i32 [[TMP7]] to i64 +; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP15]], [[INDEX]] +; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]] ; SCALABLE-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] -; SCALABLE-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; SCALABLE-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; SCALABLE-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025 +; SCALABLE-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; SCALABLE: [[MIDDLE_BLOCK]]: -; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]] -; SCALABLE-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]] +; SCALABLE-NEXT: br label %[[FOR_END:.*]] ; SCALABLE: [[SCALAR_PH]]: -; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] ; SCALABLE-NEXT: br label %[[FOR_BODY:.*]] ; SCALABLE: [[FOR_BODY]]: -; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ] +; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ] ; SCALABLE-NEXT: [[CMP:%.*]] = icmp ugt i64 [[IV]], 10 ; SCALABLE-NEXT: br i1 [[CMP]], label %[[DO_STORE:.*]], label %[[LATCH]] ; SCALABLE: [[DO_STORE]]: @@ -971,7 +919,7 @@ define void @conditional_uniform_store(ptr noalias nocapture %a, ptr noalias noc ; SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 ; SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] +; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; SCALABLE: [[FOR_END]]: ; SCALABLE-NEXT: ret void ; @@ -1027,12 +975,6 @@ define void @conditional_uniform_store(ptr noalias nocapture %a, ptr noalias noc ; TF-SCALABLE-NEXT: [[ENTRY:.*]]: ; TF-SCALABLE-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; TF-SCALABLE: [[VECTOR_PH]]: -; TF-SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; TF-SCALABLE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; TF-SCALABLE-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 -; TF-SCALABLE-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP2]] -; TF-SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; TF-SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; TF-SCALABLE-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; TF-SCALABLE-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2 ; TF-SCALABLE-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V]], i64 0 @@ -1109,42 +1051,38 @@ define void @uniform_store_unaligned(ptr noalias nocapture %a, ptr noalias nocap ; SCALABLE-LABEL: define void @uniform_store_unaligned( ; SCALABLE-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]], i64 [[V:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { ; SCALABLE-NEXT: [[ENTRY:.*]]: -; SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; SCALABLE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]] -; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; SCALABLE-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; SCALABLE: [[VECTOR_PH]]: -; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 -; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]] -; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] ; SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; SCALABLE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V]], i64 0 ; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer ; SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]] ; SCALABLE: [[VECTOR_BODY]]: -; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1025, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; SCALABLE-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) ; SCALABLE-NEXT: store i64 [[V]], ptr [[B]], align 1 ; SCALABLE-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] -; SCALABLE-NEXT: store <vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr [[TMP7]], align 8 -; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; SCALABLE-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; SCALABLE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP9]]) +; SCALABLE-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 +; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP10]], [[INDEX]] +; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]] +; SCALABLE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025 +; SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; SCALABLE: [[MIDDLE_BLOCK]]: -; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]] -; SCALABLE-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]] +; SCALABLE-NEXT: br label %[[FOR_END:.*]] ; SCALABLE: [[SCALAR_PH]]: -; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] ; SCALABLE-NEXT: br label %[[FOR_BODY:.*]] ; SCALABLE: [[FOR_BODY]]: -; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] +; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] ; SCALABLE-NEXT: store i64 [[V]], ptr [[B]], align 1 ; SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] ; SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 ; SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] +; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; SCALABLE: [[FOR_END]]: ; SCALABLE-NEXT: ret void ; @@ -1187,12 +1125,6 @@ define void @uniform_store_unaligned(ptr noalias nocapture %a, ptr noalias nocap ; TF-SCALABLE-NEXT: [[ENTRY:.*]]: ; TF-SCALABLE-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; TF-SCALABLE: [[VECTOR_PH]]: -; TF-SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; TF-SCALABLE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; TF-SCALABLE-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 -; TF-SCALABLE-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP2]] -; TF-SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; TF-SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; TF-SCALABLE-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; TF-SCALABLE-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2 ; TF-SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V]], i64 0 diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vector-loop-backedge-elimination-with-evl.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vector-loop-backedge-elimination-with-evl.ll index d93a5c0..2400198 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/vector-loop-backedge-elimination-with-evl.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/vector-loop-backedge-elimination-with-evl.ll @@ -8,12 +8,6 @@ define void @foo(ptr %arg) #0 { ; CHECK-NEXT: [[ENTRY:.*]]: ; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; CHECK-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 -; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 3, [[TMP2]] -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2 ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll index d3c3c6b..8d287fe 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll @@ -12,18 +12,12 @@ define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) { ; IF-EVL-NEXT: entry: ; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; IF-EVL: vector.ph: -; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; IF-EVL-NEXT: [[TMP8:%.*]] = sub i64 [[TMP5]], 1 -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP8]] -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 ; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]] ; IF-EVL: vector.body: ; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] -; IF-EVL-NEXT: [[TMP11:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[TMP11:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP11]], i32 4, i1 true) ; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]]) @@ -68,8 +62,6 @@ define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) { ; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]] ; NO-VP: vector.body: ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -80,7 +72,7 @@ define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) { ; NO-VP-NEXT: [[TMP11:%.*]] = add nsw <vscale x 4 x i32> [[WIDE_LOAD1]], [[WIDE_LOAD]] ; NO-VP-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]] ; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP11]], ptr [[TMP12]], align 4 -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; NO-VP: middle.block: diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vf-will-not-generate-any-vector-insts.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vf-will-not-generate-any-vector-insts.ll index bda9839..1f3bd45 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/vf-will-not-generate-any-vector-insts.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/vf-will-not-generate-any-vector-insts.ll @@ -17,17 +17,22 @@ define void @vf_will_not_generate_any_vector_insts(ptr %src, ptr %dst) { ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] ; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <2 x ptr> poison, ptr [[DST]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT3:%.*]] = shufflevector <2 x ptr> [[BROADCAST_SPLATINSERT2]], <2 x ptr> poison, <2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP10]], 4 +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x ptr> poison, ptr [[DST]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x ptr> [[BROADCAST_SPLATINSERT]], <vscale x 4 x ptr> poison, <vscale x 4 x i32> zeroinitializer ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[SRC]], align 4, !alias.scope [[META0:![0-9]+]] -; CHECK-NEXT: [[BROADCAST_SPLATINSERT4:%.*]] = insertelement <2 x i32> poison, i32 [[TMP0]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT5:%.*]] = shufflevector <2 x i32> [[BROADCAST_SPLATINSERT4]], <2 x i32> poison, <2 x i32> zeroinitializer -; CHECK-NEXT: call void @llvm.masked.scatter.v2i32.v2p0(<2 x i32> [[BROADCAST_SPLAT5]], <2 x ptr> [[BROADCAST_SPLAT3]], i32 4, <2 x i1> splat (i1 true)), !alias.scope [[META3:![0-9]+]], !noalias [[META0]] -; CHECK-NEXT: call void @llvm.masked.scatter.v2i32.v2p0(<2 x i32> [[BROADCAST_SPLAT5]], <2 x ptr> [[BROADCAST_SPLAT3]], i32 4, <2 x i1> splat (i1 true)), !alias.scope [[META3]], !noalias [[META0]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) +; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[SRC]], align 4, !alias.scope [[META0:![0-9]+]] +; CHECK-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP6]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT3:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT2]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer +; CHECK-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[BROADCAST_SPLAT3]], <vscale x 4 x ptr> align 4 [[BROADCAST_SPLAT]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP5]]), !alias.scope [[META3:![0-9]+]], !noalias [[META0]] +; CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP5]] to i64 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP7]], [[EVL_BASED_IV]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]] ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i64 [[INDEX_NEXT]], 100 ; CHECK-NEXT: br i1 [[TMP1]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: @@ -41,7 +46,7 @@ define void @vf_will_not_generate_any_vector_insts(ptr %src, ptr %dst) { ; CHECK-NEXT: store i32 [[DOTPRE]], ptr [[DST]], align 4 ; CHECK-NEXT: [[TMP3]] = add nuw i64 [[TMP2]], 1 ; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[TMP3]], 100 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -65,8 +70,9 @@ exit: ; CHECK: [[META2]] = distinct !{[[META2]], !"LVerDomain"} ; CHECK: [[META3]] = !{[[META4:![0-9]+]]} ; CHECK: [[META4]] = distinct !{[[META4]], [[META2]]} -; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META6:![0-9]+]], [[META7:![0-9]+]]} +; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META6:![0-9]+]], [[META7:![0-9]+]], [[META8:![0-9]+]]} ; CHECK: [[META6]] = !{!"llvm.loop.isvectorized", i32 1} -; CHECK: [[META7]] = !{!"llvm.loop.unroll.runtime.disable"} -; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META6]]} +; CHECK: [[META7]] = !{!"llvm.loop.isvectorized.tailfoldingstyle", !"evl"} +; CHECK: [[META8]] = !{!"llvm.loop.unroll.runtime.disable"} +; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META6]]} ;. diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-riscv-vector-reverse.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-riscv-vector-reverse.ll index d7c9ce4..4669522 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-riscv-vector-reverse.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-riscv-vector-reverse.ll @@ -10,8 +10,7 @@ ; RUN: -disable-output < %s 2>&1 | FileCheck %s define void @vector_reverse_i64(ptr nocapture noundef writeonly %A, ptr nocapture noundef readonly %B, i32 noundef signext %n) { -; CHECK: VPlan 'Initial VPlan for VF={vscale x 1,vscale x 2,vscale x 4},UF>=1' { -; CHECK-NEXT: Live-in vp<[[VF:%.+]]> = VF +; CHECK: VPlan 'Initial VPlan for VF={vscale x 1,vscale x 2,vscale x 4},UF={1}' { ; CHECK-NEXT: Live-in vp<[[VFxUF:%.+]]> = VF * UF ; CHECK-NEXT: Live-in vp<[[VTC:%.+]]> = vector-trip-count ; CHECK-NEXT: vp<[[OTC:%.+]]> = original trip-count @@ -21,41 +20,42 @@ define void @vector_reverse_i64(ptr nocapture noundef writeonly %A, ptr nocaptur ; CHECK-NEXT: Successor(s): scalar.ph, vector.ph ; CHECK-EMPTY: ; CHECK-NEXT: vector.ph: -; CHECK-NEXT: vp<[[RESUME_IV_A:%.+]]> = DERIVED-IV ir<%n> + vp<[[VTC]]> * ir<-1> -; CHECK-NEXT: vp<[[RESUME_IV_B:%.+]]> = DERIVED-IV ir<%n> + vp<[[VTC]]> * ir<-1> ; CHECK-NEXT: Successor(s): vector loop ; CHECK-EMPTY: ; CHECK-NEXT: <x1> vector loop: { ; CHECK-NEXT: vector.body: ; CHECK-NEXT: EMIT vp<[[INDUCTION:%.+]]> = CANONICAL-INDUCTION ir<0>, vp<[[INDEX_NEXT:%.+]]> -; CHECK-NEXT: vp<[[DERIVED_IV:%.+]]> = DERIVED-IV ir<%n> + vp<[[INDUCTION]]> * ir<-1> -; CHECK-NEXT: vp<[[SCALAR_STEPS:%.+]]> = SCALAR-STEPS vp<[[DERIVED_IV]]>, ir<-1>, vp<[[VF]]> +; CHECK-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%.+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]> +; CHECK-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ vp<[[OTC]]>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ] +; CHECK-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]> +; CHECK-NEXT: vp<[[DERIVED_IV:%.+]]> = DERIVED-IV ir<%n> + vp<[[EVL_PHI]]> * ir<-1> +; CHECK-NEXT: vp<[[SCALAR_STEPS:%.+]]> = SCALAR-STEPS vp<[[DERIVED_IV]]>, ir<-1>, vp<[[EVL]]> ; CHECK-NEXT: CLONE ir<[[IDX:%.+]]> = add nsw vp<[[SCALAR_STEPS]]>, ir<-1> ; CHECK-NEXT: CLONE ir<[[IDX_PROM:%.+]]> = zext ir<[[IDX]]> ; CHECK-NEXT: CLONE ir<[[ARRAY_IDX_B:%.+]]> = getelementptr inbounds ir<[[B:%.+]]>, ir<[[IDX_PROM]]> -; CHECK-NEXT: vp<[[VEC_END_PTR_B:%.+]]> = vector-end-pointer inbounds ir<[[ARRAY_IDX_B]]>, vp<[[VF]]> -; CHECK-NEXT: WIDEN ir<[[VAL_B:%.+]]> = load vp<[[VEC_END_PTR_B]]> +; CHECK-NEXT: vp<[[VEC_END_PTR_B:%.+]]> = vector-end-pointer ir<[[ARRAY_IDX_B]]>, vp<[[EVL]]> +; CHECK-NEXT: WIDEN ir<[[VAL_B:%.+]]> = vp.load vp<[[VEC_END_PTR_B]]>, vp<[[EVL]]> ; CHECK-NEXT: WIDEN ir<[[ADD_RESULT:%.+]]> = add ir<[[VAL_B]]>, ir<1> ; CHECK-NEXT: CLONE ir<[[ARRAY_IDX_A:%.+]]> = getelementptr inbounds ir<[[A:%.+]]>, ir<[[IDX_PROM]]> -; CHECK-NEXT: vp<[[VEC_END_PTR_A:%.+]]> = vector-end-pointer inbounds ir<[[ARRAY_IDX_A]]>, vp<[[VF]]> -; CHECK-NEXT: WIDEN store vp<[[VEC_END_PTR_A]]>, ir<[[ADD_RESULT]]> -; CHECK-NEXT: EMIT vp<[[INDEX_NEXT]]> = add nuw vp<[[INDUCTION]]>, vp<[[VFxUF]]> +; CHECK-NEXT: vp<[[VEC_END_PTR_A:%.+]]> = vector-end-pointer ir<[[ARRAY_IDX_A]]>, vp<[[EVL]]> +; CHECK-NEXT: WIDEN vp.store vp<[[VEC_END_PTR_A]]>, ir<[[ADD_RESULT]]>, vp<[[EVL]]> +; CHECK-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[EVL]]>, vp<[[EVL_PHI]]> +; CHECK-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[EVL]]> +; CHECK-NEXT: EMIT vp<[[INDEX_NEXT]]> = add vp<[[INDUCTION]]>, vp<[[VFxUF]]> ; CHECK-NEXT: EMIT branch-on-count vp<[[INDEX_NEXT]]>, vp<[[VTC]]> ; CHECK-NEXT: No successors ; CHECK-NEXT: } ; CHECK-NEXT: Successor(s): middle.block ; CHECK-EMPTY: ; CHECK-NEXT: middle.block: -; CHECK-NEXT: EMIT vp<[[CMP:%.+]]> = icmp eq vp<[[OTC]]>, vp<[[VTC]]> -; CHECK-NEXT: EMIT branch-on-cond vp<[[CMP]]> -; CHECK-NEXT: Successor(s): ir-bb<for.cond.cleanup>, scalar.ph +; CHECK-NEXT: Successor(s): ir-bb<for.cond.cleanup> ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<for.cond.cleanup>: ; CHECK-NEXT: No successors ; CHECK-EMPTY: ; CHECK-NEXT: scalar.ph: -; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[RESUME_IV_A]]>, middle.block ], [ ir<%n>, ir-bb<entry> ] -; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val>.1 = phi [ vp<[[RESUME_IV_B]]>, middle.block ], [ ir<%n>, ir-bb<entry> ] +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ ir<%n>, ir-bb<entry> ] +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val>.1 = phi [ ir<%n>, ir-bb<entry> ] ; CHECK-NEXT: Successor(s): ir-bb<for.body> ; entry: diff --git a/llvm/test/Transforms/LoopVectorize/SystemZ/mem-interleaving-costs-03.ll b/llvm/test/Transforms/LoopVectorize/SystemZ/mem-interleaving-costs-03.ll index 13c443c..b4eebcc 100644 --- a/llvm/test/Transforms/LoopVectorize/SystemZ/mem-interleaving-costs-03.ll +++ b/llvm/test/Transforms/LoopVectorize/SystemZ/mem-interleaving-costs-03.ll @@ -12,7 +12,7 @@ define noundef i32 @fun(i32 %argc, ptr nocapture readnone %argv) { entry: %l_4774.i = alloca [4 x [2 x i128]], align 8 - call void @llvm.lifetime.start.p0(i64 128, ptr nonnull %l_4774.i) + call void @llvm.lifetime.start.p0(ptr nonnull %l_4774.i) br label %for.cond4.preheader.i for.cond4.preheader.i: ; preds = %for.cond4.preheader.i, %entry @@ -31,13 +31,13 @@ func_1.exit: ; preds = %for.cond4.preheader %cmp200.i = icmp ne i128 %0, 0 %conv202.i = zext i1 %cmp200.i to i64 %call203.i = tail call i64 @safe_sub_func_int64_t_s_s(i64 noundef %conv202.i, i64 noundef 9139899272418802852) - call void @llvm.lifetime.end.p0(i64 128, ptr nonnull %l_4774.i) + call void @llvm.lifetime.end.p0(ptr nonnull %l_4774.i) br label %for.cond for.cond: ; preds = %for.cond, %func_1.exit br label %for.cond } -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) declare dso_local i64 @safe_sub_func_int64_t_s_s(i64, i64) diff --git a/llvm/test/Transforms/LoopVectorize/X86/pr35432.ll b/llvm/test/Transforms/LoopVectorize/X86/pr35432.ll index 639fb86..6fc7ed2 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/pr35432.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/pr35432.ll @@ -13,9 +13,9 @@ define i32 @main(ptr %ptr) { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[I:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[S:%.*]] = alloca i16, align 2 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[I]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[I]]) ; CHECK-NEXT: store i32 0, ptr [[I]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr nonnull [[S]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[S]]) ; CHECK-NEXT: [[CALL:%.*]] = call i32 (ptr, ...) @goo(ptr nonnull [[I]]) ; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[I]], align 4 ; CHECK-NEXT: [[STOREMERGE6:%.*]] = trunc i32 [[TMP0]] to i16 @@ -111,16 +111,16 @@ define i32 @main(ptr %ptr) { ; CHECK-NEXT: br label [[FOR_END12]] ; CHECK: for.end12: ; CHECK-NEXT: [[CALL13:%.*]] = call i32 (ptr, ...) @foo(ptr nonnull [[S]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr nonnull [[S]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[I]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[S]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[I]]) ; CHECK-NEXT: ret i32 0 ; entry: %i = alloca i32, align 4 %s = alloca i16, align 2 - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %i) #3 + call void @llvm.lifetime.start.p0(ptr nonnull %i) #3 store i32 0, ptr %i, align 4 - call void @llvm.lifetime.start.p0(i64 2, ptr nonnull %s) #3 + call void @llvm.lifetime.start.p0(ptr nonnull %s) #3 %call = call i32 (ptr, ...) @goo(ptr nonnull %i) #3 %0 = load i32, ptr %i, align 4 %storemerge6 = trunc i32 %0 to i16 @@ -174,17 +174,17 @@ for.cond.for.end12_crit_edge: ; preds = %for.inc9 for.end12: ; preds = %for.cond.for.end12_crit_edge, %entry %call13 = call i32 (ptr, ...) @foo(ptr nonnull %s) #3 - call void @llvm.lifetime.end.p0(i64 2, ptr nonnull %s) #3 - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %i) #3 + call void @llvm.lifetime.end.p0(ptr nonnull %s) #3 + call void @llvm.lifetime.end.p0(ptr nonnull %i) #3 ret i32 0 } ; Function Attrs: argmemonly nounwind -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 declare i32 @goo(...) local_unnamed_addr #2 declare i32 @foo(...) local_unnamed_addr #2 ; Function Attrs: argmemonly nounwind -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 diff --git a/llvm/test/Transforms/LoopVectorize/X86/scev-checks-unprofitable.ll b/llvm/test/Transforms/LoopVectorize/X86/scev-checks-unprofitable.ll index 4145967..864d221 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/scev-checks-unprofitable.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/scev-checks-unprofitable.ll @@ -23,9 +23,6 @@ define void @value_defined_in_loop1_used_for_trip_counts(i32 %start, i1 %c, ptr ; CHECK: [[LOOP_3_PREHEADER]]: ; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[IV_1_LCSSA2]], 15 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 16 -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; CHECK-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[IV_1_LCSSA2]], 1 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i64> [[BROADCAST_SPLATINSERT]], <16 x i64> poison, <16 x i32> zeroinitializer diff --git a/llvm/test/Transforms/LoopVectorize/dbg-outer-loop-vect.ll b/llvm/test/Transforms/LoopVectorize/dbg-outer-loop-vect.ll index baa967c..53de252 100644 --- a/llvm/test/Transforms/LoopVectorize/dbg-outer-loop-vect.ll +++ b/llvm/test/Transforms/LoopVectorize/dbg-outer-loop-vect.ll @@ -13,7 +13,7 @@ define void @foo(ptr %h) !dbg !4 { ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]], !dbg [[DBG21]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[FOR_COND_CLEANUP32:%.*]] ], !dbg [[DBG222:![0-9]+]] -; CHECK-NEXT: br label [[FOR_COND5_PREHEADER1:%.*]], !dbg [[DBG21]] +; CHECK-NEXT: br label [[FOR_COND5_PREHEADER1:%.*]] ; CHECK: for.cond5.preheader1: ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i64> [ zeroinitializer, [[VECTOR_BODY]] ], [ [[TMP4:%.*]], [[FOR_COND5_PREHEADER1]] ], !dbg [[DBG22:![0-9]+]] ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i32, ptr [[H]], <4 x i64> [[VEC_PHI]] diff --git a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-scalable-vf1.ll b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-scalable-vf1.ll index 2bafa6c..b266ddf 100644 --- a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-scalable-vf1.ll +++ b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-scalable-vf1.ll @@ -15,7 +15,6 @@ define i64 @pr97452_scalable_vf1_for_live_out(ptr %src) { ; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 23, [[TMP1]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 23, [[N_MOD_VF]] -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vscale.i32() ; CHECK-NEXT: [[TMP4:%.*]] = sub i32 [[TMP3]], 1 ; CHECK-NEXT: [[VECTOR_RECUR_INIT:%.*]] = insertelement <vscale x 1 x i64> poison, i64 0, i32 [[TMP4]] @@ -26,7 +25,7 @@ define i64 @pr97452_scalable_vf1_for_live_out(ptr %src) { ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i64 [[INDEX]] ; CHECK-NEXT: [[WIDE_LOAD]] = load <vscale x 1 x i64>, ptr [[TMP5]], align 8 ; CHECK-NEXT: [[TMP7:%.*]] = call <vscale x 1 x i64> @llvm.vector.splice.nxv1i64(<vscale x 1 x i64> [[VECTOR_RECUR]], <vscale x 1 x i64> [[WIDE_LOAD]], i32 -1) -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]] ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: @@ -83,7 +82,6 @@ define void @pr97452_scalable_vf1_for_no_live_out(ptr %src, ptr noalias %dst) { ; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 23, [[TMP1]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 23, [[N_MOD_VF]] -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vscale.i32() ; CHECK-NEXT: [[TMP4:%.*]] = sub i32 [[TMP3]], 1 ; CHECK-NEXT: [[VECTOR_RECUR_INIT:%.*]] = insertelement <vscale x 1 x i64> poison, i64 0, i32 [[TMP4]] @@ -96,7 +94,7 @@ define void @pr97452_scalable_vf1_for_no_live_out(ptr %src, ptr noalias %dst) { ; CHECK-NEXT: [[TMP7:%.*]] = call <vscale x 1 x i64> @llvm.vector.splice.nxv1i64(<vscale x 1 x i64> [[VECTOR_RECUR]], <vscale x 1 x i64> [[WIDE_LOAD]], i32 -1) ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[INDEX]] ; CHECK-NEXT: store <vscale x 1 x i64> [[TMP7]], ptr [[TMP8]], align 8 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]] ; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: diff --git a/llvm/test/Transforms/LoopVectorize/histograms.ll b/llvm/test/Transforms/LoopVectorize/histograms.ll index 1adc0bf..f0ceae7 100644 --- a/llvm/test/Transforms/LoopVectorize/histograms.ll +++ b/llvm/test/Transforms/LoopVectorize/histograms.ll @@ -1,25 +1,48 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 3 ; RUN: opt < %s -passes=loop-vectorize,instcombine -enable-histogram-loop-vectorization -force-vector-width=2 -S | FileCheck %s -;; Currently we don't expect this to vectorize, since the generic cost model returns -;; invalid for the histogram intrinsic. define void @simple_histogram(ptr noalias %buckets, ptr readonly %indices, i64 %N) { ; CHECK-LABEL: define void @simple_histogram( ; CHECK-SAME: ptr noalias [[BUCKETS:%.*]], ptr readonly [[INDICES:%.*]], i64 [[N:%.*]]) { ; CHECK-NEXT: entry: +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 2 +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]] +; CHECK: vector.ph: +; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], -2 ; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[INDICES]], i64 [[IV]] +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i32>, ptr [[TMP0]], align 4 +; CHECK-NEXT: [[TMP1:%.*]] = zext <2 x i32> [[WIDE_LOAD]] to <2 x i64> +; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x i64> [[TMP1]], i64 0 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i32, ptr [[BUCKETS]], i64 [[TMP2]] +; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i64> [[TMP1]], i64 1 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw i32, ptr [[BUCKETS]], i64 [[TMP4]] +; CHECK-NEXT: [[TMP6:%.*]] = insertelement <2 x ptr> poison, ptr [[TMP3]], i64 0 +; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x ptr> [[TMP6]], ptr [[TMP5]], i64 1 +; CHECK-NEXT: call void @llvm.experimental.vector.histogram.add.v2p0.i32(<2 x ptr> [[TMP7]], i32 1, <2 x i1> splat (i1 true)) +; CHECK-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 2 +; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK: middle.block: +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH]] +; CHECK: scalar.ph: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY1:%.*]] ] +; CHECK-NEXT: br label [[FOR_BODY1:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[INDICES]], i64 [[IV]] +; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[FOR_BODY1]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[INDICES]], i64 [[IV1]] ; CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[IDXPROM1:%.*]] = zext i32 [[TMP12]] to i64 ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i32, ptr [[BUCKETS]], i64 [[IDXPROM1]] ; CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 ; CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[TMP13]], 1 ; CHECK-NEXT: store i32 [[INC]], ptr [[ARRAYIDX2]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_EXIT:%.*]], label [[FOR_BODY]] +; CHECK-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 +; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N]] +; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_EXIT]], label [[FOR_BODY1]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: for.exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/lifetime.ll b/llvm/test/Transforms/LoopVectorize/lifetime.ll index 3dd41b5..61e8635 100644 --- a/llvm/test/Transforms/LoopVectorize/lifetime.ll +++ b/llvm/test/Transforms/LoopVectorize/lifetime.ll @@ -12,23 +12,23 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3 define void @test(ptr %d) { entry: %arr = alloca [1024 x i32], align 16 - call void @llvm.lifetime.start.p0(i64 4096, ptr %arr) #1 + call void @llvm.lifetime.start.p0(ptr %arr) #1 br label %for.body for.body: %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] - call void @llvm.lifetime.end.p0(i64 4096, ptr %arr) #1 + call void @llvm.lifetime.end.p0(ptr %arr) #1 %arrayidx = getelementptr inbounds i32, ptr %d, i64 %indvars.iv %0 = load i32, ptr %arrayidx, align 8 store i32 100, ptr %arrayidx, align 8 - call void @llvm.lifetime.start.p0(i64 4096, ptr %arr) #1 + call void @llvm.lifetime.start.p0(ptr %arr) #1 %indvars.iv.next = add i64 %indvars.iv, 1 %lftr.wideiv = trunc i64 %indvars.iv.next to i32 %exitcond = icmp ne i32 %lftr.wideiv, 128 br i1 %exitcond, label %for.body, label %for.end for.end: - call void @llvm.lifetime.end.p0(i64 4096, ptr %arr) #1 + call void @llvm.lifetime.end.p0(ptr %arr) #1 ret void } @@ -40,26 +40,26 @@ for.end: define void @testbitcast(ptr %d) { entry: %arr = alloca [1024 x i32], align 16 - call void @llvm.lifetime.start.p0(i64 4096, ptr %arr) #1 + call void @llvm.lifetime.start.p0(ptr %arr) #1 br label %for.body for.body: %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] - call void @llvm.lifetime.end.p0(i64 4096, ptr %arr) #1 + call void @llvm.lifetime.end.p0(ptr %arr) #1 %arrayidx = getelementptr inbounds i32, ptr %d, i64 %indvars.iv %0 = load i32, ptr %arrayidx, align 8 store i32 100, ptr %arrayidx, align 8 - call void @llvm.lifetime.start.p0(i64 4096, ptr %arr) #1 + call void @llvm.lifetime.start.p0(ptr %arr) #1 %indvars.iv.next = add i64 %indvars.iv, 1 %lftr.wideiv = trunc i64 %indvars.iv.next to i32 %exitcond = icmp ne i32 %lftr.wideiv, 128 br i1 %exitcond, label %for.body, label %for.end for.end: - call void @llvm.lifetime.end.p0(i64 4096, ptr %arr) #1 + call void @llvm.lifetime.end.p0(ptr %arr) #1 ret void } -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 diff --git a/llvm/test/Transforms/LoopVectorize/outer_loop_scalable.ll b/llvm/test/Transforms/LoopVectorize/outer_loop_scalable.ll index 4fda9d3..31c3248 100644 --- a/llvm/test/Transforms/LoopVectorize/outer_loop_scalable.ll +++ b/llvm/test/Transforms/LoopVectorize/outer_loop_scalable.ll @@ -24,12 +24,10 @@ define void @foo() { ; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] -; CHECK-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP19:%.*]] = mul nuw i64 [[TMP18]], 4 ; CHECK-NEXT: [[TMP4:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64() ; CHECK-NEXT: [[TMP6:%.*]] = mul <vscale x 4 x i64> [[TMP4]], splat (i64 1) ; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP6]] -; CHECK-NEXT: [[TMP9:%.*]] = mul i64 1, [[TMP19]] +; CHECK-NEXT: [[TMP9:%.*]] = mul i64 1, [[TMP3]] ; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP9]], i64 0 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] @@ -52,7 +50,7 @@ define void @foo() { ; CHECK: vector.latch: ; CHECK-NEXT: [[VEC_PHI5:%.*]] = phi <vscale x 4 x float> [ [[TMP12]], [[INNER_LOOP1]] ] ; CHECK-NEXT: call void @llvm.masked.scatter.nxv4f32.nxv4p0(<vscale x 4 x float> [[VEC_PHI5]], <vscale x 4 x ptr> [[TMP10]], i32 4, <vscale x 4 x i1> splat (i1 true)) -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP19]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]] ; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] diff --git a/llvm/test/Transforms/LoopVectorize/runtime-check.ll b/llvm/test/Transforms/LoopVectorize/runtime-check.ll index c5838fe..8d9ee2a 100644 --- a/llvm/test/Transforms/LoopVectorize/runtime-check.ll +++ b/llvm/test/Transforms/LoopVectorize/runtime-check.ll @@ -26,7 +26,7 @@ define i32 @foo(ptr nocapture %a, ptr nocapture %b, i32 %n) nounwind uwtable ssp ; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP1]], 16, !dbg [[DBG9]] ; CHECK-NEXT: br i1 [[DIFF_CHECK]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]], !dbg [[DBG9]] ; CHECK: vector.ph: -; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[TMP0]], 2147483644, !dbg [[DBG9]] +; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[TMP0]], 2147483644 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]], !dbg [[DBG9]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ], !dbg [[DBG9]] diff --git a/llvm/test/Transforms/LoopVectorize/scalable-assume.ll b/llvm/test/Transforms/LoopVectorize/scalable-assume.ll index 8291164..83541f2 100644 --- a/llvm/test/Transforms/LoopVectorize/scalable-assume.ll +++ b/llvm/test/Transforms/LoopVectorize/scalable-assume.ll @@ -14,8 +14,6 @@ define void @test1(ptr noalias nocapture %a, ptr noalias nocapture readonly %b) ; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1600, [[TMP3]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1600, [[N_MOD_VF]] -; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -39,7 +37,7 @@ define void @test1(ptr noalias nocapture %a, ptr noalias nocapture readonly %b) ; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[TMP18]] ; CHECK-NEXT: store <vscale x 2 x float> [[TMP14]], ptr [[TMP16]], align 4 ; CHECK-NEXT: store <vscale x 2 x float> [[TMP15]], ptr [[TMP19]], align 4 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: @@ -90,8 +88,6 @@ define void @test2(ptr %a, ptr noalias %b) { ; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1600, [[TMP7]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1600, [[N_MOD_VF]] -; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 4 ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -113,7 +109,7 @@ define void @test2(ptr %a, ptr noalias %b) { ; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[TMP18]] ; CHECK-NEXT: store <vscale x 2 x float> [[TMP14]], ptr [[TMP16]], align 4 ; CHECK-NEXT: store <vscale x 2 x float> [[TMP15]], ptr [[TMP19]], align 4 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP9]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]] ; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: @@ -159,13 +155,11 @@ define void @predicated_assume(ptr noalias nocapture readonly %a, ptr noalias no ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 2 +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP6]] +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP7:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64() ; CHECK-NEXT: [[TMP8:%.*]] = mul <vscale x 2 x i64> [[TMP7]], splat (i64 1) ; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP8]] diff --git a/llvm/test/Transforms/LoopVectorize/scalable-first-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/scalable-first-order-recurrence.ll index bfc0a48..07402ab 100644 --- a/llvm/test/Transforms/LoopVectorize/scalable-first-order-recurrence.ll +++ b/llvm/test/Transforms/LoopVectorize/scalable-first-order-recurrence.ll @@ -36,8 +36,6 @@ define i32 @recurrence_1(ptr nocapture readonly %a, ptr nocapture %b, i32 %n) { ; CHECK-VF4UF1-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 4 ; CHECK-VF4UF1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], [[TMP11]] ; CHECK-VF4UF1-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[N_MOD_VF]] -; CHECK-VF4UF1-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-VF4UF1-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP12]], 4 ; CHECK-VF4UF1-NEXT: [[TMP14:%.*]] = call i32 @llvm.vscale.i32() ; CHECK-VF4UF1-NEXT: [[TMP15:%.*]] = mul nuw i32 [[TMP14]], 4 ; CHECK-VF4UF1-NEXT: [[TMP16:%.*]] = sub i32 [[TMP15]], 1 @@ -53,7 +51,7 @@ define i32 @recurrence_1(ptr nocapture readonly %a, ptr nocapture %b, i32 %n) { ; CHECK-VF4UF1-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]] ; CHECK-VF4UF1-NEXT: [[TMP22:%.*]] = add <vscale x 4 x i32> [[WIDE_LOAD]], [[TMP20]] ; CHECK-VF4UF1-NEXT: store <vscale x 4 x i32> [[TMP22]], ptr [[TMP21]], align 4 -; CHECK-VF4UF1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP13]] +; CHECK-VF4UF1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP11]] ; CHECK-VF4UF1-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-VF4UF1-NEXT: br i1 [[TMP24]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-VF4UF1: [[MIDDLE_BLOCK]]: @@ -101,8 +99,6 @@ define i32 @recurrence_1(ptr nocapture readonly %a, ptr nocapture %b, i32 %n) { ; CHECK-VF4UF2-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 8 ; CHECK-VF4UF2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], [[TMP11]] ; CHECK-VF4UF2-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[N_MOD_VF]] -; CHECK-VF4UF2-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-VF4UF2-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP12]], 8 ; CHECK-VF4UF2-NEXT: [[TMP14:%.*]] = call i32 @llvm.vscale.i32() ; CHECK-VF4UF2-NEXT: [[TMP15:%.*]] = mul nuw i32 [[TMP14]], 4 ; CHECK-VF4UF2-NEXT: [[TMP16:%.*]] = sub i32 [[TMP15]], 1 @@ -128,7 +124,7 @@ define i32 @recurrence_1(ptr nocapture readonly %a, ptr nocapture %b, i32 %n) { ; CHECK-VF4UF2-NEXT: [[TMP31:%.*]] = getelementptr inbounds i32, ptr [[TMP25]], i64 [[TMP30]] ; CHECK-VF4UF2-NEXT: store <vscale x 4 x i32> [[TMP26]], ptr [[TMP25]], align 4 ; CHECK-VF4UF2-NEXT: store <vscale x 4 x i32> [[TMP27]], ptr [[TMP31]], align 4 -; CHECK-VF4UF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP13]] +; CHECK-VF4UF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP11]] ; CHECK-VF4UF2-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-VF4UF2-NEXT: br i1 [[TMP32]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-VF4UF2: [[MIDDLE_BLOCK]]: @@ -198,8 +194,6 @@ define i32 @recurrence_2(ptr nocapture readonly %a, i32 %n) { ; CHECK-VF4UF1-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 ; CHECK-VF4UF1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP4]] ; CHECK-VF4UF1-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]] -; CHECK-VF4UF1-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-VF4UF1-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4 ; CHECK-VF4UF1-NEXT: [[TMP7:%.*]] = call i32 @llvm.vscale.i32() ; CHECK-VF4UF1-NEXT: [[TMP8:%.*]] = mul nuw i32 [[TMP7]], 4 ; CHECK-VF4UF1-NEXT: [[TMP9:%.*]] = sub i32 [[TMP8]], 1 @@ -217,7 +211,7 @@ define i32 @recurrence_2(ptr nocapture readonly %a, i32 %n) { ; CHECK-VF4UF1-NEXT: [[TMP15:%.*]] = select <vscale x 4 x i1> [[TMP14]], <vscale x 4 x i32> [[TMP13]], <vscale x 4 x i32> zeroinitializer ; CHECK-VF4UF1-NEXT: [[TMP16:%.*]] = icmp slt <vscale x 4 x i32> [[VEC_PHI]], [[TMP15]] ; CHECK-VF4UF1-NEXT: [[TMP17]] = select <vscale x 4 x i1> [[TMP16]], <vscale x 4 x i32> [[VEC_PHI]], <vscale x 4 x i32> [[TMP15]] -; CHECK-VF4UF1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] +; CHECK-VF4UF1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]] ; CHECK-VF4UF1-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-VF4UF1-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK-VF4UF1: [[MIDDLE_BLOCK]]: @@ -259,8 +253,6 @@ define i32 @recurrence_2(ptr nocapture readonly %a, i32 %n) { ; CHECK-VF4UF2-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 8 ; CHECK-VF4UF2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP4]] ; CHECK-VF4UF2-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]] -; CHECK-VF4UF2-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-VF4UF2-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 8 ; CHECK-VF4UF2-NEXT: [[TMP7:%.*]] = call i32 @llvm.vscale.i32() ; CHECK-VF4UF2-NEXT: [[TMP8:%.*]] = mul nuw i32 [[TMP7]], 4 ; CHECK-VF4UF2-NEXT: [[TMP9:%.*]] = sub i32 [[TMP8]], 1 @@ -289,7 +281,7 @@ define i32 @recurrence_2(ptr nocapture readonly %a, i32 %n) { ; CHECK-VF4UF2-NEXT: [[TMP24:%.*]] = icmp slt <vscale x 4 x i32> [[VEC_PHI1]], [[TMP22]] ; CHECK-VF4UF2-NEXT: [[TMP25]] = select <vscale x 4 x i1> [[TMP23]], <vscale x 4 x i32> [[VEC_PHI]], <vscale x 4 x i32> [[TMP21]] ; CHECK-VF4UF2-NEXT: [[TMP26]] = select <vscale x 4 x i1> [[TMP24]], <vscale x 4 x i32> [[VEC_PHI1]], <vscale x 4 x i32> [[TMP22]] -; CHECK-VF4UF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] +; CHECK-VF4UF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]] ; CHECK-VF4UF2-NEXT: [[TMP27:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-VF4UF2-NEXT: br i1 [[TMP27]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK-VF4UF2: [[MIDDLE_BLOCK]]: @@ -388,8 +380,6 @@ define void @recurrence_3(ptr nocapture readonly %a, ptr nocapture %b, i32 %n, f ; CHECK-VF4UF1-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 4 ; CHECK-VF4UF1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], [[TMP12]] ; CHECK-VF4UF1-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[N_MOD_VF]] -; CHECK-VF4UF1-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-VF4UF1-NEXT: [[TMP14:%.*]] = mul nuw i64 [[TMP13]], 4 ; CHECK-VF4UF1-NEXT: [[TMP15:%.*]] = add i64 1, [[N_VEC]] ; CHECK-VF4UF1-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x double> poison, double [[CONV1]], i64 0 ; CHECK-VF4UF1-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x double> [[BROADCAST_SPLATINSERT]], <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer @@ -411,7 +401,7 @@ define void @recurrence_3(ptr nocapture readonly %a, ptr nocapture %b, i32 %n, f ; CHECK-VF4UF1-NEXT: [[TMP25:%.*]] = fsub fast <vscale x 4 x double> [[TMP22]], [[TMP24]] ; CHECK-VF4UF1-NEXT: [[TMP26:%.*]] = getelementptr inbounds double, ptr [[B]], i64 [[OFFSET_IDX]] ; CHECK-VF4UF1-NEXT: store <vscale x 4 x double> [[TMP25]], ptr [[TMP26]], align 8, !alias.scope [[META9:![0-9]+]], !noalias [[META6]] -; CHECK-VF4UF1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP14]] +; CHECK-VF4UF1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]] ; CHECK-VF4UF1-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-VF4UF1-NEXT: br i1 [[TMP28]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK-VF4UF1: [[MIDDLE_BLOCK]]: @@ -466,8 +456,6 @@ define void @recurrence_3(ptr nocapture readonly %a, ptr nocapture %b, i32 %n, f ; CHECK-VF4UF2-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 8 ; CHECK-VF4UF2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], [[TMP12]] ; CHECK-VF4UF2-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[N_MOD_VF]] -; CHECK-VF4UF2-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-VF4UF2-NEXT: [[TMP14:%.*]] = mul nuw i64 [[TMP13]], 8 ; CHECK-VF4UF2-NEXT: [[TMP15:%.*]] = add i64 1, [[N_VEC]] ; CHECK-VF4UF2-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x double> poison, double [[CONV1]], i64 0 ; CHECK-VF4UF2-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x double> [[BROADCAST_SPLATINSERT]], <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer @@ -502,7 +490,7 @@ define void @recurrence_3(ptr nocapture readonly %a, ptr nocapture %b, i32 %n, f ; CHECK-VF4UF2-NEXT: [[TMP38:%.*]] = getelementptr inbounds double, ptr [[TMP34]], i64 [[TMP37]] ; CHECK-VF4UF2-NEXT: store <vscale x 4 x double> [[TMP32]], ptr [[TMP34]], align 8, !alias.scope [[META9:![0-9]+]], !noalias [[META6]] ; CHECK-VF4UF2-NEXT: store <vscale x 4 x double> [[TMP33]], ptr [[TMP38]], align 8, !alias.scope [[META9]], !noalias [[META6]] -; CHECK-VF4UF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP14]] +; CHECK-VF4UF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]] ; CHECK-VF4UF2-NEXT: [[TMP39:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-VF4UF2-NEXT: br i1 [[TMP39]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK-VF4UF2: [[MIDDLE_BLOCK]]: @@ -568,12 +556,10 @@ define i64 @constant_folded_previous_value() { ; CHECK-VF4UF1-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; CHECK-VF4UF1-NEXT: [[N_MOD_VF:%.*]] = urem i64 1000, [[TMP3]] ; CHECK-VF4UF1-NEXT: [[N_VEC:%.*]] = sub i64 1000, [[N_MOD_VF]] -; CHECK-VF4UF1-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-VF4UF1-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-VF4UF1-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK-VF4UF1: [[VECTOR_BODY]]: ; CHECK-VF4UF1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-VF4UF1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-VF4UF1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-VF4UF1-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-VF4UF1-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK-VF4UF1: [[MIDDLE_BLOCK]]: @@ -596,12 +582,10 @@ define i64 @constant_folded_previous_value() { ; CHECK-VF4UF2-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 ; CHECK-VF4UF2-NEXT: [[N_MOD_VF:%.*]] = urem i64 1000, [[TMP3]] ; CHECK-VF4UF2-NEXT: [[N_VEC:%.*]] = sub i64 1000, [[N_MOD_VF]] -; CHECK-VF4UF2-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-VF4UF2-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8 ; CHECK-VF4UF2-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK-VF4UF2: [[VECTOR_BODY]]: ; CHECK-VF4UF2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-VF4UF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-VF4UF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-VF4UF2-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-VF4UF2-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK-VF4UF2: [[MIDDLE_BLOCK]]: @@ -646,14 +630,12 @@ define i32 @extract_second_last_iteration(ptr %cval, i32 %x) { ; CHECK-VF4UF1-NEXT: [[TMP3:%.*]] = mul nuw i32 [[TMP2]], 4 ; CHECK-VF4UF1-NEXT: [[N_MOD_VF:%.*]] = urem i32 96, [[TMP3]] ; CHECK-VF4UF1-NEXT: [[N_VEC:%.*]] = sub i32 96, [[N_MOD_VF]] -; CHECK-VF4UF1-NEXT: [[TMP4:%.*]] = call i32 @llvm.vscale.i32() -; CHECK-VF4UF1-NEXT: [[TMP5:%.*]] = mul nuw i32 [[TMP4]], 4 ; CHECK-VF4UF1-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[X]], i64 0 ; CHECK-VF4UF1-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer ; CHECK-VF4UF1-NEXT: [[TMP6:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32() ; CHECK-VF4UF1-NEXT: [[TMP7:%.*]] = mul <vscale x 4 x i32> [[TMP6]], splat (i32 1) ; CHECK-VF4UF1-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i32> zeroinitializer, [[TMP7]] -; CHECK-VF4UF1-NEXT: [[TMP8:%.*]] = mul i32 1, [[TMP5]] +; CHECK-VF4UF1-NEXT: [[TMP8:%.*]] = mul i32 1, [[TMP3]] ; CHECK-VF4UF1-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP8]], i64 0 ; CHECK-VF4UF1-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer ; CHECK-VF4UF1-NEXT: br label %[[VECTOR_BODY:.*]] @@ -661,7 +643,7 @@ define i32 @extract_second_last_iteration(ptr %cval, i32 %x) { ; CHECK-VF4UF1-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-VF4UF1-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i32> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-VF4UF1-NEXT: [[TMP9:%.*]] = add <vscale x 4 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]] -; CHECK-VF4UF1-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP5]] +; CHECK-VF4UF1-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP3]] ; CHECK-VF4UF1-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i32> [[VEC_IND]], [[DOTSPLAT]] ; CHECK-VF4UF1-NEXT: [[TMP10:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-VF4UF1-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] @@ -690,13 +672,11 @@ define i32 @extract_second_last_iteration(ptr %cval, i32 %x) { ; CHECK-VF4UF2-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 96, [[TMP1]] ; CHECK-VF4UF2-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK-VF4UF2: [[VECTOR_PH]]: -; CHECK-VF4UF2-NEXT: [[TMP2:%.*]] = call i32 @llvm.vscale.i32() -; CHECK-VF4UF2-NEXT: [[TMP3:%.*]] = mul nuw i32 [[TMP2]], 8 -; CHECK-VF4UF2-NEXT: [[N_MOD_VF:%.*]] = urem i32 96, [[TMP3]] -; CHECK-VF4UF2-NEXT: [[N_VEC:%.*]] = sub i32 96, [[N_MOD_VF]] ; CHECK-VF4UF2-NEXT: [[TMP4:%.*]] = call i32 @llvm.vscale.i32() ; CHECK-VF4UF2-NEXT: [[TMP5:%.*]] = mul nuw i32 [[TMP4]], 4 ; CHECK-VF4UF2-NEXT: [[TMP6:%.*]] = mul i32 [[TMP5]], 2 +; CHECK-VF4UF2-NEXT: [[N_MOD_VF:%.*]] = urem i32 96, [[TMP6]] +; CHECK-VF4UF2-NEXT: [[N_VEC:%.*]] = sub i32 96, [[N_MOD_VF]] ; CHECK-VF4UF2-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[X]], i64 0 ; CHECK-VF4UF2-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer ; CHECK-VF4UF2-NEXT: [[TMP7:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32() @@ -778,8 +758,6 @@ define void @sink_after(ptr %a, ptr %b, i64 %n) { ; CHECK-VF4UF1-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4 ; CHECK-VF4UF1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP6]] ; CHECK-VF4UF1-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; CHECK-VF4UF1-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-VF4UF1-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 ; CHECK-VF4UF1-NEXT: [[TMP9:%.*]] = call i32 @llvm.vscale.i32() ; CHECK-VF4UF1-NEXT: [[TMP10:%.*]] = mul nuw i32 [[TMP9]], 4 ; CHECK-VF4UF1-NEXT: [[TMP11:%.*]] = sub i32 [[TMP10]], 1 @@ -797,7 +775,7 @@ define void @sink_after(ptr %a, ptr %b, i64 %n) { ; CHECK-VF4UF1-NEXT: [[TMP18:%.*]] = mul nsw <vscale x 4 x i32> [[TMP17]], [[TMP16]] ; CHECK-VF4UF1-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]] ; CHECK-VF4UF1-NEXT: store <vscale x 4 x i32> [[TMP18]], ptr [[TMP19]], align 4, !alias.scope [[META20:![0-9]+]], !noalias [[META17]] -; CHECK-VF4UF1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] +; CHECK-VF4UF1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] ; CHECK-VF4UF1-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-VF4UF1-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] ; CHECK-VF4UF1: [[MIDDLE_BLOCK]]: @@ -837,8 +815,6 @@ define void @sink_after(ptr %a, ptr %b, i64 %n) { ; CHECK-VF4UF2-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 8 ; CHECK-VF4UF2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP6]] ; CHECK-VF4UF2-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; CHECK-VF4UF2-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-VF4UF2-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 8 ; CHECK-VF4UF2-NEXT: [[TMP9:%.*]] = call i32 @llvm.vscale.i32() ; CHECK-VF4UF2-NEXT: [[TMP10:%.*]] = mul nuw i32 [[TMP9]], 4 ; CHECK-VF4UF2-NEXT: [[TMP11:%.*]] = sub i32 [[TMP10]], 1 @@ -868,7 +844,7 @@ define void @sink_after(ptr %a, ptr %b, i64 %n) { ; CHECK-VF4UF2-NEXT: [[TMP30:%.*]] = getelementptr inbounds i32, ptr [[TMP26]], i64 [[TMP29]] ; CHECK-VF4UF2-NEXT: store <vscale x 4 x i32> [[TMP24]], ptr [[TMP26]], align 4, !alias.scope [[META20:![0-9]+]], !noalias [[META17]] ; CHECK-VF4UF2-NEXT: store <vscale x 4 x i32> [[TMP25]], ptr [[TMP30]], align 4, !alias.scope [[META20]], !noalias [[META17]] -; CHECK-VF4UF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] +; CHECK-VF4UF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] ; CHECK-VF4UF2-NEXT: [[TMP31:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-VF4UF2-NEXT: br i1 [[TMP31]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] ; CHECK-VF4UF2: [[MIDDLE_BLOCK]]: diff --git a/llvm/test/Transforms/LoopVectorize/scalable-inductions.ll b/llvm/test/Transforms/LoopVectorize/scalable-inductions.ll index 6264941..ce4592c 100644 --- a/llvm/test/Transforms/LoopVectorize/scalable-inductions.ll +++ b/llvm/test/Transforms/LoopVectorize/scalable-inductions.ll @@ -15,13 +15,11 @@ define void @add_ind64_unrolled(ptr noalias nocapture %a, ptr noalias nocapture ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = shl nuw i64 [[TMP2]], 2 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP8:%.*]] = shl nuw i64 [[TMP4]], 1 ; CHECK-NEXT: [[TMP5:%.*]] = shl i64 [[TMP4]], 2 +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP5]] +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64() ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: @@ -97,17 +95,15 @@ define void @add_ind64_unrolled_nxv1i64(ptr noalias nocapture %a, ptr noalias no ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = shl nuw i64 [[TMP2]], 1 +; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 1 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP5:%.*]] = shl i64 [[TMP4]], 1 ; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 1 x i64> @llvm.stepvector.nxv1i64() ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 1 x i64> [ [[TMP6]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 1 x i64> poison, i64 [[TMP4]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 1 x i64> poison, i64 [[TMP2]], i64 0 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 1 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer ; CHECK-NEXT: [[STEP_ADD:%.*]] = add <vscale x 1 x i64> [[VEC_IND]], [[DOTSPLAT]] ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[INDEX]] @@ -122,7 +118,7 @@ define void @add_ind64_unrolled_nxv1i64(ptr noalias nocapture %a, ptr noalias no ; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i64, ptr [[TMP13]], i64 [[TMP14]] ; CHECK-NEXT: store <vscale x 1 x i64> [[TMP11]], ptr [[TMP13]], align 8 ; CHECK-NEXT: store <vscale x 1 x i64> [[TMP12]], ptr [[TMP15]], align 8 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 1 x i64> [[STEP_ADD]], [[DOTSPLAT]] ; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] @@ -185,13 +181,11 @@ define void @add_unique_ind32(ptr noalias nocapture %a, i64 %n) { ; CHECK-NEXT: [[TMP3:%.*]] = shl nuw i64 [[TMP2]], 2 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP5:%.*]] = shl nuw i64 [[TMP4]], 2 ; CHECK-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i32 ; CHECK-NEXT: [[IND_END:%.*]] = shl i32 [[DOTCAST]], 1 ; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32() ; CHECK-NEXT: [[TMP7:%.*]] = shl <vscale x 4 x i32> [[TMP6]], splat (i32 1) -; CHECK-NEXT: [[TMP8:%.*]] = trunc i64 [[TMP5]] to i32 +; CHECK-NEXT: [[TMP8:%.*]] = trunc i64 [[TMP3]] to i32 ; CHECK-NEXT: [[TMP9:%.*]] = shl i32 [[TMP8]], 1 ; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP9]], i64 0 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer @@ -201,7 +195,7 @@ define void @add_unique_ind32(ptr noalias nocapture %a, i64 %n) { ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i32> [ [[TMP7]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]] ; CHECK-NEXT: store <vscale x 4 x i32> [[VEC_IND]], ptr [[TMP10]], align 4 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i32> [[VEC_IND]], [[DOTSPLAT]] ; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] @@ -262,8 +256,6 @@ define void @add_unique_indf32(ptr noalias nocapture %a, i64 %n) { ; CHECK-NEXT: [[TMP3:%.*]] = shl nuw i64 [[TMP2]], 2 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP6:%.*]] = shl nuw i64 [[TMP5]], 2 ; CHECK-NEXT: [[DOTCAST:%.*]] = sitofp i64 [[N_VEC]] to float ; CHECK-NEXT: [[TMP4:%.*]] = fmul float [[DOTCAST]], 2.000000e+00 ; CHECK-NEXT: [[IND_END:%.*]] = fadd float [[TMP4]], 0.000000e+00 @@ -271,7 +263,7 @@ define void @add_unique_indf32(ptr noalias nocapture %a, i64 %n) { ; CHECK-NEXT: [[TMP8:%.*]] = uitofp <vscale x 4 x i32> [[TMP7]] to <vscale x 4 x float> ; CHECK-NEXT: [[TMP9:%.*]] = fmul <vscale x 4 x float> [[TMP8]], splat (float 2.000000e+00) ; CHECK-NEXT: [[INDUCTION:%.*]] = fadd <vscale x 4 x float> [[TMP9]], zeroinitializer -; CHECK-NEXT: [[TMP12:%.*]] = uitofp i64 [[TMP6]] to float +; CHECK-NEXT: [[TMP12:%.*]] = uitofp i64 [[TMP3]] to float ; CHECK-NEXT: [[TMP13:%.*]] = fmul float [[TMP12]], 2.000000e+00 ; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x float> poison, float [[TMP13]], i64 0 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x float> [[DOTSPLATINSERT]], <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer @@ -281,7 +273,7 @@ define void @add_unique_indf32(ptr noalias nocapture %a, i64 %n) { ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x float> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]] ; CHECK-NEXT: store <vscale x 4 x float> [[VEC_IND]], ptr [[TMP14]], align 4 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = fadd <vscale x 4 x float> [[VEC_IND]], [[DOTSPLAT]] ; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] diff --git a/llvm/test/Transforms/LoopVectorize/scalable-iv-outside-user.ll b/llvm/test/Transforms/LoopVectorize/scalable-iv-outside-user.ll index 1ec2993..0467a58 100644 --- a/llvm/test/Transforms/LoopVectorize/scalable-iv-outside-user.ll +++ b/llvm/test/Transforms/LoopVectorize/scalable-iv-outside-user.ll @@ -12,13 +12,11 @@ define i32 @iv_live_out_wide(ptr %dst) { ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 2000, [[TMP1]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vscale.i32() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i32 [[TMP2]], 4 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 2000, [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 2000, [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vscale.i32() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i32 [[TMP4]], 2 ; CHECK-NEXT: [[TMP6:%.*]] = mul i32 [[TMP5]], 2 +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 2000, [[TMP6]] +; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 2000, [[N_MOD_VF]] ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[STEP_2]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer ; CHECK-NEXT: [[TMP7:%.*]] = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32() diff --git a/llvm/test/Transforms/LoopVectorize/scalable-lifetime.ll b/llvm/test/Transforms/LoopVectorize/scalable-lifetime.ll index 7aac9d1..bf14c87 100644 --- a/llvm/test/Transforms/LoopVectorize/scalable-lifetime.ll +++ b/llvm/test/Transforms/LoopVectorize/scalable-lifetime.ll @@ -10,7 +10,7 @@ define void @test(ptr %d) { ; CHECK-SAME: (ptr [[D:%.*]]) { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[ARR:%.*]] = alloca [1024 x i32], align 16 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4096, ptr [[ARR]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ARR]]) ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 128, [[TMP1]] @@ -20,18 +20,16 @@ define void @test(ptr %d) { ; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 128, [[TMP3]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 128, [[N_MOD_VF]] -; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4096, ptr [[ARR]]) -; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[D]], i64 [[INDEX]] -; CHECK-NEXT: store <vscale x 2 x i32> splat (i32 100), ptr [[TMP6]], align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4096, ptr [[ARR]]) -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ARR]]) +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[D]], i64 [[INDEX]] +; CHECK-NEXT: store <vscale x 2 x i32> splat (i32 100), ptr [[TMP4]], align 8 +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ARR]]) +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] +; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 128, [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] @@ -40,39 +38,39 @@ define void @test(ptr %d) { ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4096, ptr [[ARR]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ARR]]) ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[D]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[ARRAYIDX]], align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX]], align 8 ; CHECK-NEXT: store i32 100, ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4096, ptr [[ARR]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ARR]]) ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 ; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i32 [[LFTR_WIDEIV]], 128 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[FOR_END]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: for.end: -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4096, ptr [[ARR]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ARR]]) ; CHECK-NEXT: ret void ; entry: %arr = alloca [1024 x i32], align 16 - call void @llvm.lifetime.start.p0(i64 4096, ptr %arr) #1 + call void @llvm.lifetime.start.p0(ptr %arr) #1 br label %for.body for.body: %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] - call void @llvm.lifetime.end.p0(i64 4096, ptr %arr) #1 + call void @llvm.lifetime.end.p0(ptr %arr) #1 %arrayidx = getelementptr inbounds i32, ptr %d, i64 %indvars.iv %0 = load i32, ptr %arrayidx, align 8 store i32 100, ptr %arrayidx, align 8 - call void @llvm.lifetime.start.p0(i64 4096, ptr %arr) #1 + call void @llvm.lifetime.start.p0(ptr %arr) #1 %indvars.iv.next = add i64 %indvars.iv, 1 %lftr.wideiv = trunc i64 %indvars.iv.next to i32 %exitcond = icmp ne i32 %lftr.wideiv, 128 br i1 %exitcond, label %for.body, label %for.end, !llvm.loop !0 for.end: - call void @llvm.lifetime.end.p0(i64 4096, ptr %arr) #1 + call void @llvm.lifetime.end.p0(ptr %arr) #1 ret void } @@ -90,18 +88,16 @@ define void @testloopvariant(ptr %d) { ; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 128, [[TMP3]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 128, [[N_MOD_VF]] -; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4096, ptr [[ARR]]) -; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[D]], i64 [[INDEX]] -; CHECK-NEXT: store <vscale x 2 x i32> splat (i32 100), ptr [[TMP6]], align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4096, ptr [[ARR]]) -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ARR]]) +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[D]], i64 [[INDEX]] +; CHECK-NEXT: store <vscale x 2 x i32> splat (i32 100), ptr [[TMP4]], align 8 +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ARR]]) +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] +; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 128, [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] @@ -110,12 +106,12 @@ define void @testloopvariant(ptr %d) { ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr [1024 x i32], ptr [[ARR]], i32 0, i64 [[INDVARS_IV]] -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4096, ptr [[ARR]]) +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr [1024 x i32], ptr [[ARR]], i32 0, i64 [[INDVARS_IV]] +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ARR]]) ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[D]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[ARRAYIDX]], align 8 +; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX]], align 8 ; CHECK-NEXT: store i32 100, ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4096, ptr [[ARR]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ARR]]) ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 ; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i32 [[LFTR_WIDEIV]], 128 @@ -130,11 +126,11 @@ entry: for.body: %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] %0 = getelementptr [1024 x i32], ptr %arr, i32 0, i64 %indvars.iv - call void @llvm.lifetime.end.p0(i64 4096, ptr %arr) #1 + call void @llvm.lifetime.end.p0(ptr %arr) #1 %arrayidx = getelementptr inbounds i32, ptr %d, i64 %indvars.iv %1 = load i32, ptr %arrayidx, align 8 store i32 100, ptr %arrayidx, align 8 - call void @llvm.lifetime.start.p0(i64 4096, ptr %arr) #1 + call void @llvm.lifetime.start.p0(ptr %arr) #1 %indvars.iv.next = add i64 %indvars.iv, 1 %lftr.wideiv = trunc i64 %indvars.iv.next to i32 %exitcond = icmp ne i32 %lftr.wideiv, 128 @@ -144,9 +140,9 @@ for.end: ret void } -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 !0 = distinct !{!0, !1} !1 = !{!"llvm.loop.vectorize.scalable.enable", i1 true} diff --git a/llvm/test/Transforms/LoopVectorize/scalable-loop-unpredicated-body-scalar-tail.ll b/llvm/test/Transforms/LoopVectorize/scalable-loop-unpredicated-body-scalar-tail.ll index f384d3c..901f228 100644 --- a/llvm/test/Transforms/LoopVectorize/scalable-loop-unpredicated-body-scalar-tail.ll +++ b/llvm/test/Transforms/LoopVectorize/scalable-loop-unpredicated-body-scalar-tail.ll @@ -12,8 +12,6 @@ ; CHECKUF1-DAG: %[[VSCALEX4:.*]] = shl nuw i64 %[[VSCALE]], 2 ; CHECKUF1-DAG: %n.mod.vf = urem i64 %wide.trip.count, %[[VSCALEX4]] ; CHECKUF1: %n.vec = sub nsw i64 %wide.trip.count, %n.mod.vf -; CHECKUF1: %[[VSCALE:.*]] = call i64 @llvm.vscale.i64() -; CHECKUF1: %[[VSCALEX4:.*]] = shl nuw i64 %[[VSCALE]], 2 ; CHECKUF1: vector.body: ; CHECKUF1: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] @@ -41,8 +39,6 @@ ; CHECKUF2-DAG: %[[VSCALEX8:.*]] = shl nuw i64 %[[VSCALE]], 3 ; CHECKUF2-DAG: %n.mod.vf = urem i64 %wide.trip.count, %[[VSCALEX8]] ; CHECKUF2: %n.vec = sub nsw i64 %wide.trip.count, %n.mod.vf -; CHECKUF2: %[[VSCALE:.*]] = call i64 @llvm.vscale.i64() -; CHECKUF2: %[[VSCALEX8:.*]] = shl nuw i64 %[[VSCALE]], 3 ; CHECKUF2: vector.body: ; CHECKUF2: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] diff --git a/llvm/test/Transforms/LoopVectorize/scalable-predication.ll b/llvm/test/Transforms/LoopVectorize/scalable-predication.ll index a3a4c29..ffa2602 100644 --- a/llvm/test/Transforms/LoopVectorize/scalable-predication.ll +++ b/llvm/test/Transforms/LoopVectorize/scalable-predication.ll @@ -19,13 +19,11 @@ define void @foo(i32 %val, ptr dereferenceable(1024) %ptr) { ; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 256, [[TMP2]] ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] -; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT2:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX1]], i64 256) -; CHECK-NEXT: [[INDEX_NEXT2]] = add i64 [[INDEX1]], [[TMP4]] +; CHECK-NEXT: [[INDEX_NEXT2]] = add i64 [[INDEX1]], [[TMP1]] ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT2]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: diff --git a/llvm/test/Transforms/LoopVectorize/scalable-reduction-inloop.ll b/llvm/test/Transforms/LoopVectorize/scalable-reduction-inloop.ll index ba337aa..c2ae92e 100644 --- a/llvm/test/Transforms/LoopVectorize/scalable-reduction-inloop.ll +++ b/llvm/test/Transforms/LoopVectorize/scalable-reduction-inloop.ll @@ -15,8 +15,6 @@ define i8 @reduction_add_trunc(ptr noalias nocapture %A) { ; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i32 [[TMP2]], 16 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 256, [[TMP3]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 256, [[N_MOD_VF]] -; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vscale.i32() -; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i32 [[TMP4]], 16 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -38,7 +36,7 @@ define i8 @reduction_add_trunc(ptr noalias nocapture %A) { ; CHECK-NEXT: [[TMP35:%.*]] = trunc <vscale x 8 x i32> [[TMP29]] to <vscale x 8 x i8> ; CHECK-NEXT: [[TMP34]] = zext <vscale x 8 x i8> [[TMP33]] to <vscale x 8 x i32> ; CHECK-NEXT: [[TMP36]] = zext <vscale x 8 x i8> [[TMP35]] to <vscale x 8 x i32> -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP5]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP3]] ; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: diff --git a/llvm/test/Transforms/LoopVectorize/scalable-trunc-min-bitwidth.ll b/llvm/test/Transforms/LoopVectorize/scalable-trunc-min-bitwidth.ll index 4b8ff86..4495ed6 100644 --- a/llvm/test/Transforms/LoopVectorize/scalable-trunc-min-bitwidth.ll +++ b/llvm/test/Transforms/LoopVectorize/scalable-trunc-min-bitwidth.ll @@ -13,8 +13,6 @@ define void @trunc_minimal_bitwidth(ptr %bptr, ptr noalias %hptr, i32 %val, i64 ; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[VAL:%.*]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = trunc <vscale x 4 x i32> [[BROADCAST_SPLAT]] to <vscale x 4 x i16> @@ -23,7 +21,7 @@ define void @trunc_minimal_bitwidth(ptr %bptr, ptr noalias %hptr, i32 %val, i64 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i16, ptr [[HPTR:%.*]], i64 [[INDEX]] ; CHECK-NEXT: store <vscale x 4 x i16> [[TMP4]], ptr [[TMP5]], align 2 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: @@ -73,8 +71,6 @@ define void @trunc_minimal_bitwidths_shufflevector (ptr %p, i32 %arg1, i64 %len) ; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[LEN]], [[TMP3]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[LEN]], [[N_MOD_VF]] -; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 4 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[ARG1:%.*]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = trunc <vscale x 4 x i32> [[BROADCAST_SPLAT]] to <vscale x 4 x i8> @@ -86,7 +82,7 @@ define void @trunc_minimal_bitwidths_shufflevector (ptr %p, i32 %arg1, i64 %len) ; CHECK-NEXT: [[TMP6:%.*]] = xor <vscale x 4 x i8> [[WIDE_LOAD]], [[TMP4]] ; CHECK-NEXT: [[TMP7:%.*]] = mul <vscale x 4 x i8> [[TMP6]], [[WIDE_LOAD]] ; CHECK-NEXT: store <vscale x 4 x i8> [[TMP7]], ptr [[TMP5]], align 1 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP9]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: diff --git a/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination.ll b/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination.ll index 983f327..c86a404 100644 --- a/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination.ll +++ b/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination.ll @@ -152,9 +152,6 @@ define void @remove_loop_region_with_replicate_recipe(ptr %dst, i64 range(i64 5, ; VF8UF1-NEXT: [[TMP0:%.*]] = add nsw i64 [[N]], -2 ; VF8UF1-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; VF8UF1: [[VECTOR_PH]]: -; VF8UF1-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP0]], 7 -; VF8UF1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 8 -; VF8UF1-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; VF8UF1-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[TMP0]], 1 ; VF8UF1-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <8 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0 ; VF8UF1-NEXT: [[BROADCAST_SPLAT1:%.*]] = shufflevector <8 x i64> [[BROADCAST_SPLATINSERT1]], <8 x i64> poison, <8 x i32> zeroinitializer @@ -239,9 +236,6 @@ define void @remove_loop_region_with_replicate_recipe(ptr %dst, i64 range(i64 5, ; VF8UF2-NEXT: [[TMP0:%.*]] = add nsw i64 [[N]], -2 ; VF8UF2-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; VF8UF2: [[VECTOR_PH]]: -; VF8UF2-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP0]], 15 -; VF8UF2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 16 -; VF8UF2-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; VF8UF2-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[TMP0]], 1 ; VF8UF2-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <8 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0 ; VF8UF2-NEXT: [[BROADCAST_SPLAT1:%.*]] = shufflevector <8 x i64> [[BROADCAST_SPLATINSERT1]], <8 x i64> poison, <8 x i32> zeroinitializer @@ -383,9 +377,6 @@ define void @remove_loop_region_with_replicate_recipe(ptr %dst, i64 range(i64 5, ; VF16UF1-NEXT: [[TMP0:%.*]] = add nsw i64 [[N]], -2 ; VF16UF1-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; VF16UF1: [[VECTOR_PH]]: -; VF16UF1-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP0]], 15 -; VF16UF1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 16 -; VF16UF1-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; VF16UF1-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[TMP0]], 1 ; VF16UF1-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <16 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0 ; VF16UF1-NEXT: [[BROADCAST_SPLAT1:%.*]] = shufflevector <16 x i64> [[BROADCAST_SPLATINSERT1]], <16 x i64> poison, <16 x i32> zeroinitializer @@ -701,9 +692,6 @@ define void @scev_expand_step(i64 %x, ptr %dst) { ; VF8UF1-NEXT: [[TMP1:%.*]] = add nuw nsw i64 [[TMP0]], 1 ; VF8UF1-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; VF8UF1: [[VECTOR_PH]]: -; VF8UF1-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP1]], 7 -; VF8UF1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 8 -; VF8UF1-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; VF8UF1-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[TMP1]], 1 ; VF8UF1-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0 ; VF8UF1-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <8 x i64> [[BROADCAST_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer @@ -817,9 +805,6 @@ define void @scev_expand_step(i64 %x, ptr %dst) { ; VF8UF2-NEXT: [[TMP1:%.*]] = add nuw nsw i64 [[TMP0]], 1 ; VF8UF2-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; VF8UF2: [[VECTOR_PH]]: -; VF8UF2-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP1]], 15 -; VF8UF2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 16 -; VF8UF2-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; VF8UF2-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[TMP1]], 1 ; VF8UF2-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0 ; VF8UF2-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <8 x i64> [[BROADCAST_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer @@ -1014,9 +999,6 @@ define void @scev_expand_step(i64 %x, ptr %dst) { ; VF16UF1-NEXT: [[TMP1:%.*]] = add nuw nsw i64 [[TMP0]], 1 ; VF16UF1-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; VF16UF1: [[VECTOR_PH]]: -; VF16UF1-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP1]], 15 -; VF16UF1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 16 -; VF16UF1-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; VF16UF1-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[TMP1]], 1 ; VF16UF1-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0 ; VF16UF1-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i64> [[BROADCAST_SPLATINSERT]], <16 x i64> poison, <16 x i32> zeroinitializer diff --git a/llvm/test/Transforms/LoopVectorize/vectorize-force-tail-with-evl.ll b/llvm/test/Transforms/LoopVectorize/vectorize-force-tail-with-evl.ll index d3f7794..4af9767 100644 --- a/llvm/test/Transforms/LoopVectorize/vectorize-force-tail-with-evl.ll +++ b/llvm/test/Transforms/LoopVectorize/vectorize-force-tail-with-evl.ll @@ -45,8 +45,6 @@ define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) { ; NO-VP-NEXT: [[TMP14:%.*]] = mul nuw i64 [[TMP1]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP14]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP15:%.*]] = mul nuw i64 [[TMP2]], 4 ; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]] ; NO-VP: vector.body: ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -57,7 +55,7 @@ define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) { ; NO-VP-NEXT: [[TMP16:%.*]] = add nsw <vscale x 4 x i32> [[WIDE_LOAD1]], [[WIDE_LOAD]] ; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]] ; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP16]], ptr [[TMP9]], align 4 -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP15]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP14]] ; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; NO-VP: middle.block: @@ -90,7 +88,6 @@ define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) { ; NO-VP-DEF-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() ; NO-VP-DEF-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP1]] ; NO-VP-DEF-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-DEF-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() ; NO-VP-DEF-NEXT: br label [[VECTOR_BODY:%.*]] ; NO-VP-DEF: vector.body: ; NO-VP-DEF-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -101,7 +98,7 @@ define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) { ; NO-VP-DEF-NEXT: [[TMP8:%.*]] = add nsw <vscale x 1 x i32> [[WIDE_LOAD1]], [[WIDE_LOAD]] ; NO-VP-DEF-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]] ; NO-VP-DEF-NEXT: store <vscale x 1 x i32> [[TMP8]], ptr [[TMP9]], align 4 -; NO-VP-DEF-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]] +; NO-VP-DEF-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]] ; NO-VP-DEF-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-DEF-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; NO-VP-DEF: middle.block: diff --git a/llvm/test/Transforms/LoopVectorize/vplan-iv-transforms.ll b/llvm/test/Transforms/LoopVectorize/vplan-iv-transforms.ll index 128594c..e779233 100644 --- a/llvm/test/Transforms/LoopVectorize/vplan-iv-transforms.ll +++ b/llvm/test/Transforms/LoopVectorize/vplan-iv-transforms.ll @@ -89,8 +89,8 @@ define void @iv_expand(ptr %p, i64 %n) { ; CHECK-NEXT: Successor(s): middle.block ; CHECK: VPlan 'Final VPlan for VF={8},UF={1}' ; CHECK: ir-bb<vector.ph>: -; CHECK-NEXT: IR %n.mod.vf = urem i64 %n, 8 -; CHECK-NEXT: IR %n.vec = sub i64 %n, %n.mod.vf +; CHECK-NEXT: EMIT vp<%n.mod.vf> = urem ir<%n>, ir<8> +; CHECK-NEXT: EMIT vp<%n.vec> = sub ir<%n>, vp<%n.mod.vf> ; CHECK-NEXT: EMIT vp<[[STEP_VECTOR:%.+]]> = step-vector ; CHECK-NEXT: EMIT vp<[[BROADCAST_0:%.+]]> = broadcast ir<0> ; CHECK-NEXT: EMIT vp<[[BROADCAST_1:%.+]]> = broadcast ir<1> @@ -109,7 +109,7 @@ define void @iv_expand(ptr %p, i64 %n) { ; CHECK-NEXT: WIDEN store ir<%q>, ir<%y> ; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[SCALAR_PHI]]>, ir<8> ; CHECK-NEXT: EMIT vp<%vec.ind.next> = add ir<%iv>, vp<[[BROADCAST_INC]]> -; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, ir<%n.vec> +; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<%n.vec> ; CHECK-NEXT: Successor(s): middle.block, vector.body entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/vplan-predicate-switch.ll b/llvm/test/Transforms/LoopVectorize/vplan-predicate-switch.ll index 46f9125..49d87a2 100644 --- a/llvm/test/Transforms/LoopVectorize/vplan-predicate-switch.ll +++ b/llvm/test/Transforms/LoopVectorize/vplan-predicate-switch.ll @@ -5,16 +5,15 @@ define void @switch4_default_common_dest_with_case(ptr %start, ptr %end) { ; CHECK: VPlan 'Final VPlan for VF={2},UF={1}' { ; CHECK-NEXT: Live-in ir<[[VF:.+]]> = VF ; CHECK-NEXT: Live-in ir<[[VFxUF:.+]]> = VF * UF -; CHECK-NEXT: Live-in ir<[[VTC:%.+]]> = vector-trip-count ; CHECK-NEXT: ir<%0> = original trip-count ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<entry>: ; CHECK-NEXT: Successor(s): ir-bb<scalar.ph>, ir-bb<vector.ph> ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<vector.ph>: -; CHECK-NEXT: IR %n.mod.vf = urem i64 %0, 2 -; CHECK-NEXT: IR %n.vec = sub i64 %0, %n.mod.vf -; CHECK-NEXT: vp<[[END:%.+]]> = DERIVED-IV ir<%start> + ir<%n.vec> * ir<1> +; CHECK-NEXT: EMIT vp<%n.mod.vf> = urem ir<%0>, ir<2> +; CHECK-NEXT: EMIT vp<[[VTC:%.+]]> = sub ir<%0>, vp<%n.mod.vf> +; CHECK-NEXT: vp<[[END:%.+]]> = DERIVED-IV ir<%start> + vp<[[VTC]]> * ir<1> ; CHECK-NEXT: Successor(s): vector.body ; CHECK-EMPTY: ; CHECK-NEXT: vector.body: @@ -78,11 +77,11 @@ define void @switch4_default_common_dest_with_case(ptr %start, ptr %end) { ; CHECK-EMPTY: ; CHECK-NEXT: default.2: ; CHECK-NEXT: EMIT vp<[[CAN_IV_NEXT]]> = add nuw vp<[[CAN_IV]]>, ir<[[VFxUF]]> -; CHECK-NEXT: EMIT branch-on-count vp<[[CAN_IV_NEXT]]>, ir<[[VTC]]> +; CHECK-NEXT: EMIT branch-on-count vp<[[CAN_IV_NEXT]]>, vp<[[VTC]]> ; CHECK-NEXT: Successor(s): middle.block, vector.body ; CHECK-EMPTY: ; CHECK-NEXT: middle.block: -; CHECK-NEXT: EMIT vp<[[MIDDLE_CMP:%.+]]> = icmp eq ir<%0>, ir<[[VTC]]> +; CHECK-NEXT: EMIT vp<[[MIDDLE_CMP:%.+]]> = icmp eq ir<%0>, vp<[[VTC]]> ; CHECK-NEXT: EMIT branch-on-cond vp<[[MIDDLE_CMP]]> ; CHECK-NEXT: Successor(s): ir-bb<exit>, ir-bb<scalar.ph> ; CHECK-EMPTY: diff --git a/llvm/test/Transforms/LoopVectorize/vplan-printing-before-execute.ll b/llvm/test/Transforms/LoopVectorize/vplan-printing-before-execute.ll index d856387..d200359 100644 --- a/llvm/test/Transforms/LoopVectorize/vplan-printing-before-execute.ll +++ b/llvm/test/Transforms/LoopVectorize/vplan-printing-before-execute.ll @@ -62,18 +62,18 @@ define void @test_tc_less_than_16(ptr %A, i64 %N) { ; ; CHECK: Executing best plan with VF=8, UF=2 ; CHECK-NEXT: VPlan 'Final VPlan for VF={8},UF={2}' { -; CHECK-NEXT: Live-in ir<[[VTC:%.+]]> = vector-trip-count -; CHECK-NEXT: ir<%and> = original trip-count +; CHECK-NEXT: Live-in ir<16> = VF * UF +; CHECK-NEXT: Live-in ir<%and> = original trip-count ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<entry>: ; CHECK-NEXT: IR %and = and i64 %N, 15 ; CHECK-NEXT: Successor(s): ir-bb<scalar.ph>, ir-bb<vector.ph> ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<vector.ph>: -; CHECK-NEXT: IR %n.mod.vf = urem i64 %and, 16 -; CHECK-NEXT: IR %n.vec = sub i64 %and, %n.mod.vf -; CHECK-NEXT: vp<[[END1:%.+]]> = DERIVED-IV ir<%and> + ir<[[VTC]]> * ir<-1> -; CHECK-NEXT: vp<[[END2:%.+]]> = DERIVED-IV ir<%A> + ir<[[VTC]]> * ir<1> +; CHECK-NEXT: EMIT vp<%n.mod.vf> = urem ir<%and>, ir<16> +; CHECK-NEXT: EMIT vp<[[VTC:%.+]]> = sub ir<%and>, vp<%n.mod.vf> +; CHECK-NEXT: vp<[[END1:%.+]]> = DERIVED-IV ir<%and> + vp<[[VTC]]> * ir<-1> +; CHECK-NEXT: vp<[[END2:%.+]]> = DERIVED-IV ir<%A> + vp<[[VTC]]> * ir<1> ; CHECK-NEXT: Successor(s): vector.body ; CHECK-EMPTY: ; CHECK-NEXT: vector.body: @@ -88,7 +88,7 @@ define void @test_tc_less_than_16(ptr %A, i64 %N) { ; CHECK-NEXT: Successor(s): middle.block ; CHECK-EMPTY: ; CHECK-NEXT: middle.block: -; CHECK-NEXT: EMIT vp<[[C:%.+]]> = icmp eq ir<%and>, ir<[[VTC]]> +; CHECK-NEXT: EMIT vp<[[C:%.+]]> = icmp eq ir<%and>, vp<[[VTC]]> ; CHECK-NEXT: EMIT branch-on-cond vp<[[C]]> ; CHECK-NEXT: Successor(s): ir-bb<exit>, ir-bb<scalar.ph> ; CHECK-EMPTY: diff --git a/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-lifetime-ends.ll b/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-lifetime-ends.ll index 7cc8458..612c96c 100644 --- a/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-lifetime-ends.ll +++ b/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-lifetime-ends.ll @@ -73,7 +73,7 @@ define void @lifetime_for_first_arg_before_multiply(ptr noalias %B, ptr noalias ; CHECK-NEXT: store <2 x double> [[TMP13]], ptr [[TMP26]], align 8 ; CHECK-NEXT: [[VEC_GEP28:%.*]] = getelementptr double, ptr [[TMP26]], i64 2 ; CHECK-NEXT: store <2 x double> [[TMP25]], ptr [[VEC_GEP28]], align 8 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]]) ; CHECK-NEXT: ret void ; entry: @@ -81,7 +81,7 @@ entry: call void @init(ptr %A) %a = load <4 x double>, ptr %A, align 8 %b = load <4 x double>, ptr %B, align 8 - call void @llvm.lifetime.end(i64 -1, ptr %A) + call void @llvm.lifetime.end(ptr %A) %c = call <4 x double> @llvm.matrix.multiply(<4 x double> %a, <4 x double> %b, i32 2, i32 2, i32 2) store <4 x double> %c, ptr %C, align 8 ret void @@ -154,7 +154,7 @@ define void @lifetime_for_second_arg_before_multiply(ptr noalias %A, ptr noalias ; CHECK-NEXT: store <2 x double> [[TMP13]], ptr [[TMP26]], align 8 ; CHECK-NEXT: [[VEC_GEP28:%.*]] = getelementptr double, ptr [[TMP26]], i64 2 ; CHECK-NEXT: store <2 x double> [[TMP25]], ptr [[VEC_GEP28]], align 8 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B]]) ; CHECK-NEXT: ret void ; entry: @@ -162,7 +162,7 @@ entry: call void @init(ptr %B) %a = load <4 x double>, ptr %A, align 8 %b = load <4 x double>, ptr %B, align 8 - call void @llvm.lifetime.end(i64 -1, ptr %B) + call void @llvm.lifetime.end(ptr %B) %c = call <4 x double> @llvm.matrix.multiply(<4 x double> %a, <4 x double> %b, i32 2, i32 2, i32 2) store <4 x double> %c, ptr %C, align 8 ret void @@ -236,7 +236,7 @@ define void @lifetime_for_first_arg_before_multiply_load_from_offset(ptr noalias ; CHECK-NEXT: store <2 x double> [[TMP13]], ptr [[TMP26]], align 8 ; CHECK-NEXT: [[VEC_GEP28:%.*]] = getelementptr double, ptr [[TMP26]], i64 2 ; CHECK-NEXT: store <2 x double> [[TMP25]], ptr [[VEC_GEP28]], align 8 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]]) ; CHECK-NEXT: ret void ; entry: @@ -245,7 +245,7 @@ entry: %gep.8 = getelementptr i8, ptr %A, i64 8 %a = load <4 x double>, ptr %gep.8, align 8 %b = load <4 x double>, ptr %B, align 8 - call void @llvm.lifetime.end(i64 -1, ptr %A) + call void @llvm.lifetime.end(ptr %A) %c = call <4 x double> @llvm.matrix.multiply(<4 x double> %a, <4 x double> %b, i32 2, i32 2, i32 2) store <4 x double> %c, ptr %C, align 8 ret void @@ -332,7 +332,7 @@ entry: br i1 %c.0, label %then, label %exit then: - call void @llvm.lifetime.end(i64 -1, ptr %A) + call void @llvm.lifetime.end(ptr %A) br label %exit exit: @@ -422,7 +422,7 @@ entry: br i1 %c.0, label %then, label %exit then: - call void @llvm.lifetime.end(i64 -1, ptr %B) + call void @llvm.lifetime.end(ptr %B) br label %exit exit: @@ -442,8 +442,8 @@ define void @multiple_unrelated_lifetimes(ptr noalias %C, i1 %c.0) { ; CHECK-NEXT: call void @init(ptr [[B]]) ; CHECK-NEXT: br i1 [[C:%.*]], label [[THEN:%.*]], label [[EXIT:%.*]] ; CHECK: then: -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[ALLOC_1]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[ALLOC_2]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ALLOC_1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ALLOC_2]]) ; CHECK-NEXT: br label [[EXIT]] ; CHECK: exit: ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr double, ptr [[A]], i64 0 @@ -522,10 +522,10 @@ entry: br i1 %c.0, label %then, label %exit then: - call void @llvm.lifetime.end(i64 -1, ptr %B) - call void @llvm.lifetime.end(i64 -1, ptr %alloc.1) - call void @llvm.lifetime.end(i64 -1, ptr %A) - call void @llvm.lifetime.end(i64 -1, ptr %alloc.2) + call void @llvm.lifetime.end(ptr %B) + call void @llvm.lifetime.end(ptr %alloc.1) + call void @llvm.lifetime.end(ptr %A) + call void @llvm.lifetime.end(ptr %alloc.2) br label %exit exit: @@ -607,8 +607,8 @@ define void @lifetimes_for_args_in_different_blocks(ptr noalias %C, i1 %c.0) { ; CHECK-NEXT: store <2 x double> [[TMP25]], ptr [[VEC_GEP28]], align 8 ; CHECK-NEXT: br label [[EXIT]] ; CHECK: exit: -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B]]) ; CHECK-NEXT: ret void ; entry: @@ -626,8 +626,8 @@ then: br label %exit exit: - call void @llvm.lifetime.end(i64 -1, ptr %A) - call void @llvm.lifetime.end(i64 -1, ptr %B) + call void @llvm.lifetime.end(ptr %A) + call void @llvm.lifetime.end(ptr %B) ret void } @@ -640,8 +640,8 @@ define void @lifetimes_for_args_in_different_blocks2(ptr noalias %C, i1 %c.0) { ; CHECK-NEXT: call void @init(ptr [[B]]) ; CHECK-NEXT: br i1 [[C:%.*]], label [[THEN:%.*]], label [[EXIT:%.*]] ; CHECK: then: -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B]]) ; CHECK-NEXT: br label [[EXIT]] ; CHECK: exit: ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr double, ptr [[A]], i64 0 @@ -716,8 +716,8 @@ entry: br i1 %c.0, label %then, label %exit then: - call void @llvm.lifetime.end(i64 -1, ptr %A) - call void @llvm.lifetime.end(i64 -1, ptr %B) + call void @llvm.lifetime.end(ptr %A) + call void @llvm.lifetime.end(ptr %B) br label %exit exit: @@ -809,7 +809,7 @@ entry: call void @init(ptr %A) call void @init(ptr %B) %a = load <4 x double>, ptr %A, align 8 - call void @llvm.lifetime.end(i64 -1, ptr %A) + call void @llvm.lifetime.end(ptr %A) br i1 %c.0, label %then, label %exit then: @@ -819,7 +819,7 @@ then: br label %exit exit: - call void @llvm.lifetime.end(i64 -1, ptr %B) + call void @llvm.lifetime.end(ptr %B) ret void } @@ -904,7 +904,7 @@ entry: call void @init(ptr %A) call void @init(ptr %B) %b = load <4 x double>, ptr %B, align 8 - call void @llvm.lifetime.end(i64 -1, ptr %B) + call void @llvm.lifetime.end(ptr %B) br i1 %c.0, label %then, label %exit then: @@ -914,11 +914,11 @@ then: br label %exit exit: - call void @llvm.lifetime.end(i64 -1, ptr %A) + call void @llvm.lifetime.end(ptr %A) ret void } declare void @init(ptr) -declare void @llvm.lifetime.end(i64, ptr) +declare void @llvm.lifetime.end(ptr) declare <4 x double> @llvm.matrix.multiply(<4 x double>, <4 x double>, i32, i32, i32) diff --git a/llvm/test/Transforms/Mem2Reg/alloca_addrspace.ll b/llvm/test/Transforms/Mem2Reg/alloca_addrspace.ll index 87ff922..f7e84274 100644 --- a/llvm/test/Transforms/Mem2Reg/alloca_addrspace.ll +++ b/llvm/test/Transforms/Mem2Reg/alloca_addrspace.ll @@ -10,6 +10,6 @@ define amdgpu_kernel void @addressspace_alloca() { ; CHECK-NEXT: ret void ; %alloca = alloca i8, align 8, addrspace(5) - call void @llvm.lifetime.start(i64 2, ptr addrspace(5) %alloca) + call void @llvm.lifetime.start(ptr addrspace(5) %alloca) ret void } diff --git a/llvm/test/Transforms/Mem2Reg/ignore-droppable.ll b/llvm/test/Transforms/Mem2Reg/ignore-droppable.ll index d4bc097..a876319 100644 --- a/llvm/test/Transforms/Mem2Reg/ignore-droppable.ll +++ b/llvm/test/Transforms/Mem2Reg/ignore-droppable.ll @@ -2,8 +2,8 @@ ; RUN: opt -passes=mem2reg -S -o - < %s | FileCheck %s declare void @llvm.assume(i1) -declare void @llvm.lifetime.start.p0(i64 %size, ptr nocapture %ptr) -declare void @llvm.lifetime.end.p0(i64 %size, ptr nocapture %ptr) +declare void @llvm.lifetime.start.p0(ptr nocapture %ptr) +declare void @llvm.lifetime.end.p0(ptr nocapture %ptr) define void @positive_assume_uses(ptr %arg) { ; CHECK-LABEL: @positive_assume_uses( @@ -54,10 +54,10 @@ define void @positive_gep_assume_uses() { ; %A = alloca {i8, i16} %B = getelementptr {i8, i16}, ptr %A, i32 0, i32 0 - call void @llvm.lifetime.start.p0(i64 2, ptr %A) + call void @llvm.lifetime.start.p0(ptr %A) call void @llvm.assume(i1 true) ["align"(ptr %B, i64 8), "align"(ptr %B, i64 16)] store {i8, i16} zeroinitializer, ptr %A - call void @llvm.lifetime.end.p0(i64 2, ptr %A) + call void @llvm.lifetime.end.p0(ptr %A) call void @llvm.assume(i1 true) ["nonnull"(ptr %B), "align"(ptr %B, i64 2)] ret void } @@ -70,10 +70,10 @@ define void @positive_mixed_assume_uses() { ; CHECK-NEXT: ret void ; %A = alloca i8 - call void @llvm.lifetime.start.p0(i64 2, ptr %A) + call void @llvm.lifetime.start.p0(ptr %A) call void @llvm.assume(i1 true) ["nonnull"(ptr %A), "align"(ptr %A, i64 8), "align"(ptr %A, i64 16)] store i8 1, ptr %A - call void @llvm.lifetime.end.p0(i64 2, ptr %A) + call void @llvm.lifetime.end.p0(ptr %A) call void @llvm.assume(i1 true) ["nonnull"(ptr %A), "align"(ptr %A, i64 2), "nonnull"(ptr %A)] call void @llvm.assume(i1 true) ["nonnull"(ptr %A), "align"(ptr %A, i64 2), "nonnull"(ptr %A)] ret void diff --git a/llvm/test/Transforms/Mem2Reg/ignore-lifetime.ll b/llvm/test/Transforms/Mem2Reg/ignore-lifetime.ll index bcc9693..510fb2b 100644 --- a/llvm/test/Transforms/Mem2Reg/ignore-lifetime.ll +++ b/llvm/test/Transforms/Mem2Reg/ignore-lifetime.ll @@ -1,15 +1,15 @@ ; RUN: opt -passes=mem2reg -S -o - < %s | FileCheck %s -declare void @llvm.lifetime.start.p0(i64 %size, ptr nocapture %ptr) -declare void @llvm.lifetime.end.p0(i64 %size, ptr nocapture %ptr) +declare void @llvm.lifetime.start.p0(ptr nocapture %ptr) +declare void @llvm.lifetime.end.p0(ptr nocapture %ptr) define void @test1() { ; CHECK: test1 ; CHECK-NOT: alloca %A = alloca i32 - call void @llvm.lifetime.start.p0(i64 2, ptr %A) + call void @llvm.lifetime.start.p0(ptr %A) store i32 1, ptr %A - call void @llvm.lifetime.end.p0(i64 2, ptr %A) + call void @llvm.lifetime.end.p0(ptr %A) ret void } @@ -17,8 +17,8 @@ define void @test2() { ; CHECK: test2 ; CHECK-NOT: alloca %A = alloca {i8, i16} - call void @llvm.lifetime.start.p0(i64 2, ptr %A) + call void @llvm.lifetime.start.p0(ptr %A) store {i8, i16} zeroinitializer, ptr %A - call void @llvm.lifetime.end.p0(i64 2, ptr %A) + call void @llvm.lifetime.end.p0(ptr %A) ret void } diff --git a/llvm/test/Transforms/MemCpyOpt/callslot_badaa.ll b/llvm/test/Transforms/MemCpyOpt/callslot_badaa.ll index 601498e..a0c0e9f 100644 --- a/llvm/test/Transforms/MemCpyOpt/callslot_badaa.ll +++ b/llvm/test/Transforms/MemCpyOpt/callslot_badaa.ll @@ -5,7 +5,7 @@ declare void @use(ptr) ; Make sure callslot optimization merges alias.scope metadata correctly when it merges instructions. ; Merging here naively generates: ; call void @llvm.memcpy.p0.p0.i64(ptr align 8 %dst, ptr align 8 %src, i64 1, i1 false), !alias.scope !3 -; call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %src), !noalias !0 +; call void @llvm.lifetime.end.p0(ptr nonnull %src), !noalias !0 ; ... ; !0 = !{!1} ; !1 = distinct !{!1, !2, !"callee1: %a"} @@ -20,18 +20,18 @@ define i8 @test(i8 %input) { %src = alloca i8 ; NOTE: we're matching the full line and looking for the lack of !alias.scope here ; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 %dst, ptr align 8 %src, i64 1, i1 false) - call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %src), !noalias !3 + call void @llvm.lifetime.start.p0(ptr nonnull %src), !noalias !3 store i8 %input, ptr %src call void @llvm.memcpy.p0.p0.i64(ptr align 8 %tmp, ptr align 8 %src, i64 1, i1 false), !alias.scope !0 - call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %src), !noalias !3 + call void @llvm.lifetime.end.p0(ptr nonnull %src), !noalias !3 call void @llvm.memcpy.p0.p0.i64(ptr align 8 %dst, ptr align 8 %tmp, i64 1, i1 false), !alias.scope !3 %ret_value = load i8, ptr %dst call void @use(ptr %src) ret i8 %ret_value } -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) declare void @llvm.memcpy.p0.p0.i64(ptr, ptr, i64, i1) !0 = !{!1} diff --git a/llvm/test/Transforms/MemCpyOpt/capturing-func.ll b/llvm/test/Transforms/MemCpyOpt/capturing-func.ll index 47c4358..c08f60a 100644 --- a/llvm/test/Transforms/MemCpyOpt/capturing-func.ll +++ b/llvm/test/Transforms/MemCpyOpt/capturing-func.ll @@ -5,8 +5,8 @@ target datalayout = "e" declare void @foo(ptr) declare void @llvm.memcpy.p0.p0.i32(ptr nocapture, ptr nocapture, i32, i1) nounwind -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) ; Check that the transformation isn't applied if the called function can ; capture the pointer argument (i.e. the nocapture attribute isn't present) @@ -51,18 +51,18 @@ define void @test_lifetime_end() { ; CHECK-LABEL: define {{[^@]+}}@test_lifetime_end() { ; CHECK-NEXT: [[PTR1:%.*]] = alloca i8, align 1 ; CHECK-NEXT: [[PTR2:%.*]] = alloca i8, align 1 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[PTR2]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[PTR2]]) ; CHECK-NEXT: call void @foo(ptr [[PTR1]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[PTR2]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PTR2]]) ; CHECK-NEXT: call void @foo(ptr [[PTR1]]) ; CHECK-NEXT: ret void ; %ptr1 = alloca i8 %ptr2 = alloca i8 - call void @llvm.lifetime.start.p0(i64 1, ptr %ptr2) + call void @llvm.lifetime.start.p0(ptr %ptr2) call void @foo(ptr %ptr2) call void @llvm.memcpy.p0.p0.i32(ptr %ptr1, ptr %ptr2, i32 1, i1 false) - call void @llvm.lifetime.end.p0(i64 1, ptr %ptr2) + call void @llvm.lifetime.end.p0(ptr %ptr2) call void @foo(ptr %ptr1) ret void } diff --git a/llvm/test/Transforms/MemCpyOpt/lifetime-missing.ll b/llvm/test/Transforms/MemCpyOpt/lifetime-missing.ll index 0626f09..06d9434 100644 --- a/llvm/test/Transforms/MemCpyOpt/lifetime-missing.ll +++ b/llvm/test/Transforms/MemCpyOpt/lifetime-missing.ll @@ -7,7 +7,7 @@ target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16 target triple = "x86_64-grtev4-linux-gnu" ; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #0 +declare void @llvm.lifetime.start.p0(ptr nocapture) #0 declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg) define void @test() { @@ -26,7 +26,7 @@ entry: %agg.tmp.sroa.14 = alloca [20 x i8], align 4 %agg.tmp.sroa.14.128.sroa_idx = getelementptr i8, ptr %agg.tmp.sroa.14, i64 4 call void @llvm.memset.p0.i64(ptr %agg.tmp.sroa.14.128.sroa_idx, i8 0, i64 1, i1 false) - call void @llvm.lifetime.start.p0(i64 20, ptr %agg.tmp3.sroa.35) + call void @llvm.lifetime.start.p0(ptr %agg.tmp3.sroa.35) call void @llvm.memcpy.p0.p0.i64(ptr %agg.tmp3.sroa.35, ptr %agg.tmp.sroa.14, i64 20, i1 false) %agg.tmp3.sroa.35.128.sroa_idx = getelementptr i8, ptr %agg.tmp3.sroa.35, i64 4 call void @llvm.memcpy.p0.p0.i64(ptr inttoptr (i64 4 to ptr), ptr %agg.tmp3.sroa.35.128.sroa_idx, i64 1, i1 false) diff --git a/llvm/test/Transforms/MemCpyOpt/lifetime.ll b/llvm/test/Transforms/MemCpyOpt/lifetime.ll index e9fc06b..4eab12a 100644 --- a/llvm/test/Transforms/MemCpyOpt/lifetime.ll +++ b/llvm/test/Transforms/MemCpyOpt/lifetime.ll @@ -5,46 +5,46 @@ ; @llvm.lifetime.start and @llvm.memcpy. declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1) -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) define void @call_slot(ptr nocapture dereferenceable(16) %arg1) { ; CHECK-LABEL: @call_slot( ; CHECK-NEXT: bb: ; CHECK-NEXT: [[TMP:%.*]] = alloca [8 x i8], align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[TMP]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP]]) ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[TMP]], i64 7 ; CHECK-NEXT: store i8 0, ptr [[TMP10]], align 1 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[TMP]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP]]) ; CHECK-NEXT: ret void ; bb: %tmp = alloca [8 x i8], align 8 - call void @llvm.lifetime.start.p0(i64 16, ptr %tmp) + call void @llvm.lifetime.start.p0(ptr %tmp) %tmp10 = getelementptr inbounds i8, ptr %tmp, i64 7 store i8 0, ptr %tmp10, align 1 call void @llvm.memcpy.p0.p0.i64(ptr align 8 %arg1, ptr align 8 %tmp, i64 16, i1 false) - call void @llvm.lifetime.end.p0(i64 16, ptr %tmp) + call void @llvm.lifetime.end.p0(ptr %tmp) ret void } define void @memcpy_memcpy_across_lifetime(ptr noalias %p1, ptr noalias %p2, ptr noalias %p3) { ; CHECK-LABEL: @memcpy_memcpy_across_lifetime( ; CHECK-NEXT: [[A:%.*]] = alloca [16 x i8], align 1 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]]) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[A]], ptr [[P1:%.*]], i64 16, i1 false) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[P1]], ptr [[P2:%.*]], i64 16, i1 false) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[P2]], ptr [[A]], i64 16, i1 false) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]]) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[P3:%.*]], ptr [[P2]], i64 16, i1 false) ; CHECK-NEXT: ret void ; %a = alloca [16 x i8] - call void @llvm.lifetime.start.p0(i64 16, ptr %a) + call void @llvm.lifetime.start.p0(ptr %a) call void @llvm.memcpy.p0.p0.i64(ptr %a, ptr %p1, i64 16, i1 false) call void @llvm.memcpy.p0.p0.i64(ptr %p1, ptr %p2, i64 16, i1 false) call void @llvm.memcpy.p0.p0.i64(ptr %p2, ptr %a, i64 16, i1 false) - call void @llvm.lifetime.end.p0(i64 16, ptr %a) + call void @llvm.lifetime.end.p0(ptr %a) call void @llvm.memcpy.p0.p0.i64(ptr %p3, ptr %p2, i64 16, i1 false) ret void } @@ -55,18 +55,18 @@ define i32 @call_slot_move_lifetime_start() { ; CHECK-LABEL: @call_slot_move_lifetime_start( ; CHECK-NEXT: [[TMP:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[DST:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DST]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DST]]) ; CHECK-NEXT: call void @call(ptr [[DST]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DST]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DST]]) ; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[DST]], align 4 ; CHECK-NEXT: ret i32 [[V]] ; %tmp = alloca i32 %dst = alloca i32 call void @call(ptr %tmp) - call void @llvm.lifetime.start.p0(i64 4, ptr %dst) + call void @llvm.lifetime.start.p0(ptr %dst) call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dst, ptr align 4 %tmp, i64 4, i1 false) - call void @llvm.lifetime.end.p0(i64 4, ptr %dst) + call void @llvm.lifetime.end.p0(ptr %dst) %v = load i32, ptr %dst ret i32 %v } @@ -76,20 +76,20 @@ define i32 @call_slot_two_lifetime_starts() { ; CHECK-NEXT: [[TMP:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[DST:%.*]] = alloca i32, align 4 ; CHECK-NEXT: call void @call(ptr [[TMP]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DST]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DST]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DST]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DST]]) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DST]], ptr align 4 [[TMP]], i64 4, i1 false) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DST]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DST]]) ; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[DST]], align 4 ; CHECK-NEXT: ret i32 [[V]] ; %tmp = alloca i32 %dst = alloca i32 call void @call(ptr %tmp) - call void @llvm.lifetime.start.p0(i64 4, ptr %dst) - call void @llvm.lifetime.start.p0(i64 4, ptr %dst) + call void @llvm.lifetime.start.p0(ptr %dst) + call void @llvm.lifetime.start.p0(ptr %dst) call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dst, ptr align 4 %tmp, i64 4, i1 false) - call void @llvm.lifetime.end.p0(i64 4, ptr %dst) + call void @llvm.lifetime.end.p0(ptr %dst) %v = load i32, ptr %dst ret i32 %v } @@ -100,9 +100,9 @@ define i32 @call_slot_clobber_before_lifetime_start() { ; CHECK-NEXT: [[DST:%.*]] = alloca i32, align 4 ; CHECK-NEXT: call void @call(ptr [[TMP]]) ; CHECK-NEXT: store i32 0, ptr [[DST]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DST]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DST]]) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DST]], ptr align 4 [[TMP]], i64 4, i1 false) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DST]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DST]]) ; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[DST]], align 4 ; CHECK-NEXT: ret i32 [[V]] ; @@ -110,9 +110,9 @@ define i32 @call_slot_clobber_before_lifetime_start() { %dst = alloca i32 call void @call(ptr %tmp) store i32 0, ptr %dst - call void @llvm.lifetime.start.p0(i64 4, ptr %dst) + call void @llvm.lifetime.start.p0(ptr %dst) call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dst, ptr align 4 %tmp, i64 4, i1 false) - call void @llvm.lifetime.end.p0(i64 4, ptr %dst) + call void @llvm.lifetime.end.p0(ptr %dst) %v = load i32, ptr %dst ret i32 %v } diff --git a/llvm/test/Transforms/MemCpyOpt/memcpy-byval-forwarding-clobbers.ll b/llvm/test/Transforms/MemCpyOpt/memcpy-byval-forwarding-clobbers.ll index 383040c..e1b32cd 100644 --- a/llvm/test/Transforms/MemCpyOpt/memcpy-byval-forwarding-clobbers.ll +++ b/llvm/test/Transforms/MemCpyOpt/memcpy-byval-forwarding-clobbers.ll @@ -7,8 +7,8 @@ declare i1 @check(ptr readonly byval(i64) align 8) readonly argmemonly declare void @clobber(ptr) argmemonly -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg) ; %a.2's lifetime ends before the call to @check. We must remove the call to @@ -25,11 +25,11 @@ define i1 @alloca_forwarding_lifetime_end_clobber() { entry: %a.1 = alloca i64, align 8 %a.2 = alloca i64, align 8 - call void @llvm.lifetime.start.p0(i64 8, ptr %a.2) + call void @llvm.lifetime.start.p0(ptr %a.2) call void @init(ptr sret(i64) align 8 %a.2) store i8 0, ptr %a.2 call void @llvm.memcpy.p0.p0.i64(ptr %a.1, ptr %a.2, i64 8, i1 false) - call void @llvm.lifetime.end.p0(i64 8, ptr %a.2) + call void @llvm.lifetime.end.p0(ptr %a.2) ;call void @clobber(ptr %a.2) %call = call i1 @check(ptr byval(i64) align 8 %a.1) ret i1 %call @@ -42,7 +42,7 @@ define i1 @alloca_forwarding_call_clobber() { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[A_1:%.*]] = alloca i64, align 8 ; CHECK-NEXT: [[A_2:%.*]] = alloca i64, align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[A_2]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_2]]) ; CHECK-NEXT: call void @init(ptr sret(i64) align 8 [[A_2]]) ; CHECK-NEXT: store i8 0, ptr [[A_2]], align 1 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[A_1]], ptr [[A_2]], i64 8, i1 false) @@ -53,7 +53,7 @@ define i1 @alloca_forwarding_call_clobber() { entry: %a.1 = alloca i64, align 8 %a.2 = alloca i64, align 8 - call void @llvm.lifetime.start.p0(i64 8, ptr %a.2) + call void @llvm.lifetime.start.p0(ptr %a.2) call void @init(ptr sret(i64) align 8 %a.2) store i8 0, ptr %a.2 call void @llvm.memcpy.p0.p0.i64(ptr %a.1, ptr %a.2, i64 8, i1 false) @@ -67,7 +67,7 @@ define i1 @alloca_forwarding_call_clobber_after() { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[A_1:%.*]] = alloca i64, align 8 ; CHECK-NEXT: [[A_2:%.*]] = alloca i64, align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[A_2]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_2]]) ; CHECK-NEXT: call void @init(ptr sret(i64) align 8 [[A_2]]) ; CHECK-NEXT: store i8 0, ptr [[A_2]], align 1 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[A_1]], ptr [[A_2]], i64 8, i1 false) @@ -78,7 +78,7 @@ define i1 @alloca_forwarding_call_clobber_after() { entry: %a.1 = alloca i64, align 8 %a.2 = alloca i64, align 8 - call void @llvm.lifetime.start.p0(i64 8, ptr %a.2) + call void @llvm.lifetime.start.p0(ptr %a.2) call void @init(ptr sret(i64) align 8 %a.2) store i8 0, ptr %a.2 call void @llvm.memcpy.p0.p0.i64(ptr %a.1, ptr %a.2, i64 8, i1 false) @@ -102,7 +102,7 @@ entry: %a.1 = alloca i64, align 8 %a.2 = alloca i64, align 8 %a.3 = alloca i64, align 8 - call void @llvm.lifetime.start.p0(i64 8, ptr %a.2) + call void @llvm.lifetime.start.p0(ptr %a.2) call void @init(ptr sret(i64) align 8 %a.2) store i8 0, ptr %a.2 call void @llvm.memcpy.p0.p0.i64(ptr %a.1, ptr %a.2, i64 8, i1 false) diff --git a/llvm/test/Transforms/MemCpyOpt/memcpy-gep-modification.ll b/llvm/test/Transforms/MemCpyOpt/memcpy-gep-modification.ll index ba6faf3..5e81c0d 100644 --- a/llvm/test/Transforms/MemCpyOpt/memcpy-gep-modification.ll +++ b/llvm/test/Transforms/MemCpyOpt/memcpy-gep-modification.ll @@ -3,8 +3,8 @@ %struct.MaskedType = type { i8, i8 } -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #0 -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #0 +declare void @llvm.lifetime.start.p0(ptr nocapture) #0 +declare void @llvm.lifetime.end.p0(ptr nocapture) #0 declare void @MaskedFunction1(ptr, ptr addrspace(1)) declare void @MaskedFunction2(ptr, ptr) @@ -13,11 +13,11 @@ define i8 @test_gep_not_modified(ptr %in0, ptr %in1) { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[FUNCALLOC:%.*]] = alloca [[STRUCT_MASKEDTYPE:%.*]], align 4 ; CHECK-NEXT: [[PTRALLOC:%.*]] = alloca i8, align 1 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[PTRALLOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[PTRALLOC]]) ; CHECK-NEXT: [[ADDRSPACECAST:%.*]] = addrspacecast ptr [[PTRALLOC]] to ptr addrspace(1) ; CHECK-NEXT: call void @MaskedFunction1(ptr [[IN1:%.*]], ptr addrspace(1) [[ADDRSPACECAST]]) ; CHECK-NEXT: [[LOAD1:%.*]] = load i8, ptr [[PTRALLOC]], align 1 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[PTRALLOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PTRALLOC]]) ; CHECK-NEXT: [[GETELEMPTR1:%.*]] = getelementptr inbounds [[STRUCT_MASKEDTYPE]], ptr [[FUNCALLOC]], i32 0, i32 1 ; CHECK-NEXT: store i8 [[LOAD1]], ptr [[GETELEMPTR1]], align 1 ; CHECK-NEXT: ret i8 0 @@ -25,11 +25,11 @@ define i8 @test_gep_not_modified(ptr %in0, ptr %in1) { entry: %funcAlloc = alloca %struct.MaskedType, align 4 %ptrAlloc = alloca i8, align 1 - call void @llvm.lifetime.start.p0(i64 4, ptr %ptrAlloc) #0 + call void @llvm.lifetime.start.p0(ptr %ptrAlloc) #0 %addrspaceCast = addrspacecast ptr %ptrAlloc to ptr addrspace(1) call void @MaskedFunction1(ptr %in1, ptr addrspace(1) %addrspaceCast) %load1 = load i8, ptr %ptrAlloc, align 1 - call void @llvm.lifetime.end.p0(i64 4, ptr %ptrAlloc) #0 + call void @llvm.lifetime.end.p0(ptr %ptrAlloc) #0 %getElemPtr1 = getelementptr inbounds %struct.MaskedType, ptr %funcAlloc, i32 0, i32 1 store i8 %load1, ptr %getElemPtr1, align 1 ret i8 0 @@ -40,19 +40,19 @@ define i8 @test_gep_modified(ptr %in0, ptr %in1) { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[FUNCALLOC:%.*]] = alloca [[STRUCT_MASKEDTYPE:%.*]], align 4 ; CHECK-NEXT: [[PTRALLOC:%.*]] = alloca i8, align 1 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[PTRALLOC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[PTRALLOC]]) ; CHECK-NEXT: [[GETELEMPTR1:%.*]] = getelementptr inbounds [[STRUCT_MASKEDTYPE]], ptr [[FUNCALLOC]], i32 0, i32 1 ; CHECK-NEXT: call void @MaskedFunction2(ptr [[IN1:%.*]], ptr [[GETELEMPTR1]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[PTRALLOC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PTRALLOC]]) ; CHECK-NEXT: ret i8 0 ; entry: %funcAlloc = alloca %struct.MaskedType, align 4 %ptrAlloc = alloca i8, align 1 - call void @llvm.lifetime.start.p0(i64 4, ptr %ptrAlloc) #0 + call void @llvm.lifetime.start.p0(ptr %ptrAlloc) #0 call void @MaskedFunction2(ptr %in1, ptr %ptrAlloc) %load1 = load i8, ptr %ptrAlloc, align 1 - call void @llvm.lifetime.end.p0(i64 4, ptr %ptrAlloc) #0 + call void @llvm.lifetime.end.p0(ptr %ptrAlloc) #0 %getElemPtr1 = getelementptr inbounds %struct.MaskedType, ptr %funcAlloc, i32 0, i32 1 store i8 %load1, ptr %getElemPtr1, align 1 ret i8 0 diff --git a/llvm/test/Transforms/MemCpyOpt/memcpy-to-memset-with-lifetimes.ll b/llvm/test/Transforms/MemCpyOpt/memcpy-to-memset-with-lifetimes.ll index 1771fe6..7a7f8e1 100644 --- a/llvm/test/Transforms/MemCpyOpt/memcpy-to-memset-with-lifetimes.ll +++ b/llvm/test/Transforms/MemCpyOpt/memcpy-to-memset-with-lifetimes.ll @@ -12,10 +12,10 @@ define void @foo(ptr noalias nocapture sret([8 x i64]) dereferenceable(64) %sret ; entry-block: %a = alloca [8 x i64], align 8 - call void @llvm.lifetime.start.p0(i64 64, ptr %a) + call void @llvm.lifetime.start.p0(ptr %a) call void @llvm.memset.p0.i64(ptr align 8 %a, i8 0, i64 64, i1 false) call void @llvm.memcpy.p0.p0.i64(ptr align 8 %sret, ptr align 8 %a, i64 64, i1 false) - call void @llvm.lifetime.end.p0(i64 64, ptr %a) + call void @llvm.lifetime.end.p0(ptr %a) ret void } @@ -24,28 +24,28 @@ define void @bar(ptr noalias nocapture sret([8 x i64]) dereferenceable(64) %sret ; CHECK-LABEL: @bar( ; CHECK-NEXT: entry-block: ; CHECK-NEXT: [[A:%.*]] = alloca [8 x i64], align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 64, ptr nonnull [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[A]]) ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr noundef nonnull align 8 dereferenceable(64) [[A]], i8 0, i64 64, i1 false) ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr noundef nonnull align 8 dereferenceable(64) [[SRET:%.*]], i8 0, i64 64, i1 false) ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr noundef nonnull align 8 dereferenceable(32) [[A]], i8 42, i64 32, i1 false) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 8 dereferenceable(64) [[OUT:%.*]], ptr noundef nonnull align 8 dereferenceable(64) [[A]], i64 64, i1 false) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 64, ptr nonnull [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[A]]) ; CHECK-NEXT: ret void ; entry-block: %a = alloca [8 x i64], align 8 - call void @llvm.lifetime.start.p0(i64 64, ptr %a) + call void @llvm.lifetime.start.p0(ptr %a) call void @llvm.memset.p0.i64(ptr align 8 %a, i8 0, i64 64, i1 false) call void @llvm.memcpy.p0.p0.i64(ptr align 8 %sret, ptr align 8 %a, i64 64, i1 false) call void @llvm.memset.p0.i64(ptr align 8 %a, i8 42, i64 32, i1 false) call void @llvm.memcpy.p0.p0.i64(ptr align 8 %out, ptr align 8 %a, i64 64, i1 false) - call void @llvm.lifetime.end.p0(i64 64, ptr %a) + call void @llvm.lifetime.end.p0(ptr %a) ret void } -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind +declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind +declare void @llvm.lifetime.end.p0(ptr nocapture) nounwind declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1) nounwind declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) nounwind diff --git a/llvm/test/Transforms/MemCpyOpt/memcpy-undef.ll b/llvm/test/Transforms/MemCpyOpt/memcpy-undef.ll index 84253dc..6e28811 100644 --- a/llvm/test/Transforms/MemCpyOpt/memcpy-undef.ll +++ b/llvm/test/Transforms/MemCpyOpt/memcpy-undef.ll @@ -29,11 +29,11 @@ define i32 @test1(ptr nocapture %foobie) nounwind noinline ssp uwtable { define void @test2(ptr sret(i8) noalias nocapture %out) nounwind noinline ssp uwtable { ; CHECK-LABEL: @test2( ; CHECK-NEXT: [[IN:%.*]] = alloca i64, align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[IN]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[IN]]) ; CHECK-NEXT: ret void ; %in = alloca i64 - call void @llvm.lifetime.start.p0(i64 8, ptr %in) + call void @llvm.lifetime.start.p0(ptr %in) call void @llvm.memcpy.p0.p0.i64(ptr %out, ptr %in, i64 8, i1 false) ret void } @@ -42,12 +42,12 @@ define void @test2(ptr sret(i8) noalias nocapture %out) nounwind noinline ssp uw define void @test_lifetime_may_alias(ptr %src, ptr %dst) { ; CHECK-LABEL: @test_lifetime_may_alias( ; CHECK-NEXT: [[LIFETIME:%.*]] = alloca i64, align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[LIFETIME]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[LIFETIME]]) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[DST:%.*]], ptr [[SRC:%.*]], i64 8, i1 false) ; CHECK-NEXT: ret void ; %lifetime = alloca i64 - call void @llvm.lifetime.start.p0(i64 8, ptr %lifetime) + call void @llvm.lifetime.start.p0(ptr %lifetime) call void @llvm.memcpy.p0.p0.i64(ptr %dst, ptr %src, i64 8, i1 false) ret void } @@ -56,12 +56,12 @@ define void @test_lifetime_may_alias(ptr %src, ptr %dst) { define void @test_lifetime_partial_alias_1(ptr noalias %dst) { ; CHECK-LABEL: @test_lifetime_partial_alias_1( ; CHECK-NEXT: [[A:%.*]] = alloca [16 x i8], align 1 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]]) ; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[A]], i64 8 ; CHECK-NEXT: ret void ; %a = alloca [16 x i8] - call void @llvm.lifetime.start.p0(i64 16, ptr %a) + call void @llvm.lifetime.start.p0(ptr %a) %gep = getelementptr i8, ptr %a, i64 8 call void @llvm.memcpy.p0.p0.i64(ptr %dst, ptr %gep, i64 8, i1 false) ret void @@ -71,12 +71,12 @@ define void @test_lifetime_partial_alias_1(ptr noalias %dst) { define void @test_lifetime_partial_alias_2(ptr noalias %dst) { ; CHECK-LABEL: @test_lifetime_partial_alias_2( ; CHECK-NEXT: [[A:%.*]] = alloca [16 x i8], align 1 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]]) ; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[A]], i64 8 ; CHECK-NEXT: ret void ; %a = alloca [16 x i8] - call void @llvm.lifetime.start.p0(i64 16, ptr %a) + call void @llvm.lifetime.start.p0(ptr %a) %gep = getelementptr i8, ptr %a, i64 8 call void @llvm.memcpy.p0.p0.i64(ptr %dst, ptr %gep, i64 16, i1 false) ret void @@ -84,4 +84,4 @@ define void @test_lifetime_partial_alias_2(ptr noalias %dst) { declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind +declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind diff --git a/llvm/test/Transforms/MemCpyOpt/memset-memcpy-oversized.ll b/llvm/test/Transforms/MemCpyOpt/memset-memcpy-oversized.ll index 343f951..2575d58 100644 --- a/llvm/test/Transforms/MemCpyOpt/memset-memcpy-oversized.ll +++ b/llvm/test/Transforms/MemCpyOpt/memset-memcpy-oversized.ll @@ -23,17 +23,17 @@ define void @test_alloca(ptr %result) { define void @test_alloca_with_lifetimes(ptr %result) { ; CHECK-LABEL: @test_alloca_with_lifetimes( ; CHECK-NEXT: [[A:%.*]] = alloca [[T:%.*]], align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]]) ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[A]], i8 0, i64 12, i1 false) ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr [[RESULT:%.*]], i8 0, i64 12, i1 false) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]]) ; CHECK-NEXT: ret void ; %a = alloca %T, align 8 - call void @llvm.lifetime.start.p0(i64 16, ptr %a) + call void @llvm.lifetime.start.p0(ptr %a) call void @llvm.memset.p0.i64(ptr align 8 %a, i8 0, i64 12, i1 false) call void @llvm.memcpy.p0.p0.i64(ptr %result, ptr align 8 %a, i64 16, i1 false) - call void @llvm.lifetime.end.p0(i64 16, ptr %a) + call void @llvm.lifetime.end.p0(ptr %a) ret void } @@ -201,5 +201,5 @@ declare void @free(ptr) declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1) -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) diff --git a/llvm/test/Transforms/MemCpyOpt/pr29105.ll b/llvm/test/Transforms/MemCpyOpt/pr29105.ll index d47bddd..f4538b9 100644 --- a/llvm/test/Transforms/MemCpyOpt/pr29105.ll +++ b/llvm/test/Transforms/MemCpyOpt/pr29105.ll @@ -7,34 +7,34 @@ define void @baz() unnamed_addr #0 { ; CHECK-LABEL: @baz( ; CHECK-NEXT: entry-block: ; CHECK-NEXT: [[TMP2:%.*]] = alloca [[FOO:%.*]], align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16384, ptr nonnull [[TMP2]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP2]]) ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr noundef nonnull align 8 dereferenceable(16384) [[TMP2]], i8 0, i64 16384, i1 false) ; CHECK-NEXT: call void @bar(ptr noalias nonnull captures(none) dereferenceable(16384) [[TMP2]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16384, ptr nonnull [[TMP2]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP2]]) ; CHECK-NEXT: ret void ; entry-block: %x.sroa.0 = alloca [2048 x i64], align 8 %tmp0 = alloca [2048 x i64], align 8 %tmp2 = alloca %Foo, align 8 - call void @llvm.lifetime.start.p0(i64 16384, ptr %x.sroa.0) - call void @llvm.lifetime.start.p0(i64 16384, ptr %tmp0) + call void @llvm.lifetime.start.p0(ptr %x.sroa.0) + call void @llvm.lifetime.start.p0(ptr %tmp0) call void @llvm.memset.p0.i64(ptr align 8 %tmp0, i8 0, i64 16384, i1 false) call void @llvm.memcpy.p0.p0.i64(ptr align 8 %x.sroa.0, ptr align 8 %tmp0, i64 16384, i1 false) - call void @llvm.lifetime.end.p0(i64 16384, ptr %tmp0) - call void @llvm.lifetime.start.p0(i64 16384, ptr %tmp2) + call void @llvm.lifetime.end.p0(ptr %tmp0) + call void @llvm.lifetime.start.p0(ptr %tmp2) call void @llvm.memcpy.p0.p0.i64(ptr align 8 %tmp2, ptr align 8 %x.sroa.0, i64 16384, i1 false) call void @bar(ptr noalias nocapture nonnull dereferenceable(16384) %tmp2) - call void @llvm.lifetime.end.p0(i64 16384, ptr %tmp2) - call void @llvm.lifetime.end.p0(i64 16384, ptr %x.sroa.0) + call void @llvm.lifetime.end.p0(ptr %tmp2) + call void @llvm.lifetime.end.p0(ptr %x.sroa.0) ret void } -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 declare void @llvm.memcpy.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1) #1 -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 declare void @bar(ptr noalias nocapture readonly dereferenceable(16384)) unnamed_addr #0 diff --git a/llvm/test/Transforms/MemCpyOpt/preserve-memssa.ll b/llvm/test/Transforms/MemCpyOpt/preserve-memssa.ll index ff36bf0..e1a6c3f 100644 --- a/llvm/test/Transforms/MemCpyOpt/preserve-memssa.ll +++ b/llvm/test/Transforms/MemCpyOpt/preserve-memssa.ll @@ -78,7 +78,7 @@ define void @test5(ptr %ptr) { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[EARLY_DATA:%.*]] = alloca [128 x i8], align 8 ; CHECK-NEXT: [[TMP:%.*]] = alloca [[T:%.*]], align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[EARLY_DATA]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[EARLY_DATA]]) ; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[PTR:%.*]], align 8 ; CHECK-NEXT: call fastcc void @decompose(ptr [[TMP]]) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[EARLY_DATA]], ptr [[TMP]], i64 32, i1 false) @@ -87,7 +87,7 @@ define void @test5(ptr %ptr) { entry: %early_data = alloca [128 x i8], align 8 %tmp = alloca %t, align 8 - call void @llvm.lifetime.start.p0(i64 32, ptr %early_data) + call void @llvm.lifetime.start.p0(ptr %early_data) %0 = load i32, ptr %ptr, align 8 call fastcc void @decompose(ptr %tmp) call void @llvm.memcpy.p0.p0.i64(ptr %early_data, ptr %tmp, i64 32, i1 false) @@ -131,7 +131,7 @@ define void @test8(ptr noalias %src, ptr %dst) { declare void @clobber() ; Function Attrs: argmemonly nounwind willreturn -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #0 +declare void @llvm.lifetime.start.p0(ptr nocapture) #0 ; Function Attrs: argmemonly nounwind willreturn declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg) #0 diff --git a/llvm/test/Transforms/MemCpyOpt/stack-move.ll b/llvm/test/Transforms/MemCpyOpt/stack-move.ll index 31e255b..940e30e 100644 --- a/llvm/test/Transforms/MemCpyOpt/stack-move.ll +++ b/llvm/test/Transforms/MemCpyOpt/stack-move.ll @@ -9,12 +9,12 @@ declare void @llvm.memcpy.p2.p1.i64(ptr addrspace(2) noalias nocapture writeonly declare void @llvm.memmove.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1 immarg) declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg) -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) -declare void @llvm.lifetime.start.p1(i64, ptr addrspace(1) nocapture) -declare void @llvm.lifetime.end.p1(i64, ptr addrspace(1) nocapture) -declare void @llvm.lifetime.start.p2(i64, ptr addrspace(2) nocapture) -declare void @llvm.lifetime.end.p2(i64, ptr addrspace(2) nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) +declare void @llvm.lifetime.start.p1(ptr addrspace(1) nocapture) +declare void @llvm.lifetime.end.p1(ptr addrspace(1) nocapture) +declare void @llvm.lifetime.start.p2(ptr addrspace(2) nocapture) +declare void @llvm.lifetime.end.p2(ptr addrspace(2) nocapture) declare i32 @use_nocapture(ptr nocapture) declare i32 @use_maycapture(ptr noundef) @@ -31,8 +31,8 @@ define void @basic_memcpy() { ; %src = alloca %struct.Foo, align 4 %dest = alloca %struct.Foo, align 4 - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.start.p0(ptr nocapture %src) + call void @llvm.lifetime.start.p0(ptr nocapture %dest) store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src %1 = call i32 @use_nocapture(ptr nocapture %src) @@ -40,8 +40,8 @@ define void @basic_memcpy() { %2 = call i32 @use_nocapture(ptr nocapture %dest) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.end.p0(ptr nocapture %src) + call void @llvm.lifetime.end.p0(ptr nocapture %dest) ret void } @@ -74,8 +74,8 @@ define void @basic_memmove() { ; %src = alloca %struct.Foo, align 4 %dest = alloca %struct.Foo, align 4 - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.start.p0(ptr nocapture %src) + call void @llvm.lifetime.start.p0(ptr nocapture %dest) store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src %1 = call i32 @use_nocapture(ptr nocapture %src) @@ -83,8 +83,8 @@ define void @basic_memmove() { %2 = call i32 @use_nocapture(ptr nocapture %dest) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.end.p0(ptr nocapture %src) + call void @llvm.lifetime.end.p0(ptr nocapture %dest) ret void } @@ -99,8 +99,8 @@ define void @load_store() { ; %src = alloca i32, align 4 %dest = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr nocapture %src) - call void @llvm.lifetime.start.p0(i64 4, ptr nocapture %dest) + call void @llvm.lifetime.start.p0(ptr nocapture %src) + call void @llvm.lifetime.start.p0(ptr nocapture %dest) store i32 42, ptr %src %1 = call i32 @use_nocapture(ptr nocapture %src) @@ -108,8 +108,8 @@ define void @load_store() { store i32 %src.val, ptr %dest %2 = call i32 @use_nocapture(ptr nocapture %dest) - call void @llvm.lifetime.end.p0(i64 4, ptr nocapture %src) - call void @llvm.lifetime.end.p0(i64 4, ptr nocapture %dest) + call void @llvm.lifetime.end.p0(ptr nocapture %src) + call void @llvm.lifetime.end.p0(ptr nocapture %dest) ret void } @@ -125,8 +125,8 @@ define void @load_store_scalable(<vscale x 4 x i32> %x) { ; %src = alloca <vscale x 4 x i32> %dest = alloca <vscale x 4 x i32> - call void @llvm.lifetime.start.p0(i64 -1, ptr nocapture %src) - call void @llvm.lifetime.start.p0(i64 -1, ptr nocapture %dest) + call void @llvm.lifetime.start.p0(ptr nocapture %src) + call void @llvm.lifetime.start.p0(ptr nocapture %dest) store <vscale x 4 x i32> %x, ptr %src %1 = call i32 @use_nocapture(ptr nocapture %src) @@ -135,8 +135,8 @@ define void @load_store_scalable(<vscale x 4 x i32> %x) { %2 = call i32 @use_nocapture(ptr nocapture %dest) - call void @llvm.lifetime.end.p0(i64 -1, ptr nocapture %src) - call void @llvm.lifetime.end.p0(i64 -1, ptr nocapture %dest) + call void @llvm.lifetime.end.p0(ptr nocapture %src) + call void @llvm.lifetime.end.p0(ptr nocapture %dest) ret void } @@ -151,16 +151,16 @@ define void @align_up() { ; %src = alloca %struct.Foo, align 4 %dest = alloca %struct.Foo, align 8 - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.start.p0(ptr nocapture %src) + call void @llvm.lifetime.start.p0(ptr nocapture %dest) store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src %1 = call i32 @use_nocapture(ptr nocapture %src) call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false) %2 = call i32 @use_nocapture(ptr nocapture %dest) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.end.p0(ptr nocapture %src) + call void @llvm.lifetime.end.p0(ptr nocapture %dest) ret void } @@ -177,21 +177,21 @@ define void @remove_extra_lifetime_intrinsics() { ; %src = alloca %struct.Foo, align 4 %dest = alloca %struct.Foo, align 4 - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.start.p0(ptr nocapture %src) + call void @llvm.lifetime.start.p0(ptr nocapture %dest) store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src %1 = call i32 @use_nocapture(ptr nocapture %src) call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src) + call void @llvm.lifetime.end.p0(ptr nocapture %src) %2 = call i32 @use_nocapture(ptr nocapture %dest) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src) + call void @llvm.lifetime.end.p0(ptr nocapture %src) + call void @llvm.lifetime.start.p0(ptr nocapture %src) %3 = call i32 @use_nocapture(ptr nocapture %dest) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.end.p0(ptr nocapture %src) + call void @llvm.lifetime.start.p0(ptr nocapture %dest) + call void @llvm.lifetime.end.p0(ptr nocapture %dest) ret void } @@ -230,8 +230,8 @@ define void @alias_no_mod() { ; %src = alloca %struct.Foo, align 4 %dest = alloca %struct.Foo, align 4 - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.start.p0(ptr nocapture %src) + call void @llvm.lifetime.start.p0(ptr nocapture %dest) %dest.alias = getelementptr %struct.Foo, ptr %dest, i32 0, i32 0 store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src %1 = call i32 @use_nocapture(ptr nocapture %src) @@ -240,8 +240,8 @@ define void @alias_no_mod() { %src.alias = getelementptr %struct.Foo, ptr %src, i32 0, i32 0 %2 = call i32 @use_nocapture(ptr nocapture %dest) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.end.p0(ptr nocapture %src) + call void @llvm.lifetime.end.p0(ptr nocapture %dest) ret void } @@ -265,16 +265,16 @@ define void @remove_scoped_noalias() { ; %src = alloca %struct.Foo, align 4 %dest = alloca %struct.Foo, align 4 - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.start.p0(ptr nocapture %src) + call void @llvm.lifetime.start.p0(ptr nocapture %dest) store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src %1 = call i32 @use_nocapture(ptr nocapture %src), !alias.scope !2 call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src) + call void @llvm.lifetime.end.p0(ptr nocapture %src) %2 = call i32 @use_nocapture(ptr nocapture %dest), !noalias !2 - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.end.p0(ptr nocapture %dest) ret void } @@ -289,16 +289,16 @@ define void @remove_alloca_metadata() { ; %src = alloca %struct.Foo, align 4, !annotation !3 %dest = alloca %struct.Foo, align 4 - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.start.p0(ptr nocapture %src) + call void @llvm.lifetime.start.p0(ptr nocapture %dest) store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src %1 = call i32 @use_nocapture(ptr nocapture %src), !alias.scope !2 call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false) %2 = call i32 @use_nocapture(ptr nocapture %dest), !noalias !2 - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.end.p0(ptr nocapture %src) + call void @llvm.lifetime.end.p0(ptr nocapture %dest) ret void } @@ -314,16 +314,16 @@ define void @noalias_on_lifetime() { ; %src = alloca %struct.Foo, align 4 %dest = alloca %struct.Foo, align 4 - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.start.p0(ptr nocapture %src) + call void @llvm.lifetime.start.p0(ptr nocapture %dest) store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src %1 = call i32 @use_nocapture(ptr nocapture %src), !alias.scope !2 call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src), !alias.scope !2 + call void @llvm.lifetime.end.p0(ptr nocapture %src), !alias.scope !2 %2 = call i32 @use_nocapture(ptr nocapture %dest), !noalias !2 - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest), !noalias !2 + call void @llvm.lifetime.end.p0(ptr nocapture %dest), !noalias !2 ret void } @@ -338,16 +338,16 @@ define void @src_ref_dest_ref_after_copy() { ; %src = alloca %struct.Foo, align 4 %dest = alloca %struct.Foo, align 4 - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.start.p0(ptr nocapture %src) + call void @llvm.lifetime.start.p0(ptr nocapture %dest) store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false) %1 = call i32 @use_readonly(ptr nocapture %src) %2 = call i32 @use_readonly(ptr nocapture %dest) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.end.p0(ptr nocapture %src) + call void @llvm.lifetime.end.p0(ptr nocapture %dest) ret void } @@ -362,16 +362,16 @@ define void @src_mod_dest_mod_after_copy() { ; %src = alloca %struct.Foo, align 4 %dest = alloca %struct.Foo, align 4 - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.start.p0(ptr nocapture %src) + call void @llvm.lifetime.start.p0(ptr nocapture %dest) store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false) %1 = call i32 @use_writeonly(ptr nocapture %src) %2 = call i32 @use_writeonly(ptr nocapture %dest) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.end.p0(ptr nocapture %src) + call void @llvm.lifetime.end.p0(ptr nocapture %dest) ret void } @@ -384,10 +384,10 @@ define void @avoid_memory_use_last_user_crash() { ; %src = alloca %struct.Foo, align 4 %dest = alloca %struct.Foo, align 4 - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src) + call void @llvm.lifetime.start.p0(ptr nocapture %src) store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src) + call void @llvm.lifetime.end.p0(ptr nocapture %src) %v = load i32, ptr %dest ret void } @@ -409,14 +409,14 @@ define void @terminator_lastuse() personality i32 0 { ; %src = alloca %struct.Foo, align 4 %dest = alloca %struct.Foo, align 4 - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.start.p0(ptr nocapture %src) + call void @llvm.lifetime.start.p0(ptr nocapture %dest) store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src %1 = call i32 @use_nocapture(ptr nocapture %src) call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false) - call void @llvm.lifetime.end.p0(i64 12, ptr %src) + call void @llvm.lifetime.end.p0(ptr %src) %rv = invoke i32 @use_nocapture(ptr %dest) to label %suc unwind label %unw unw: @@ -441,8 +441,8 @@ define void @multi_bb_memcpy(i1 %b) { ; %src = alloca i32, align 4 %dest = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr nocapture %src) - call void @llvm.lifetime.start.p0(i64 4, ptr nocapture %dest) + call void @llvm.lifetime.start.p0(ptr nocapture %src) + call void @llvm.lifetime.start.p0(ptr nocapture %dest) store i32 42, ptr %src %1 = call i32 @use_nocapture(ptr nocapture %src) br label %bb0 @@ -453,8 +453,8 @@ bb0: bb1: %2 = call i32 @use_nocapture(ptr nocapture %dest) - call void @llvm.lifetime.end.p0(i64 4, ptr nocapture %src) - call void @llvm.lifetime.end.p0(i64 4, ptr nocapture %dest) + call void @llvm.lifetime.end.p0(ptr nocapture %src) + call void @llvm.lifetime.end.p0(ptr nocapture %dest) ret void } @@ -471,8 +471,8 @@ define void @multi_bb_load_store(i1 %b) { ; %src = alloca i32, align 4 %dest = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr nocapture %src) - call void @llvm.lifetime.start.p0(i64 4, ptr nocapture %dest) + call void @llvm.lifetime.start.p0(ptr nocapture %src) + call void @llvm.lifetime.start.p0(ptr nocapture %dest) store i32 42, ptr %src %1 = call i32 @use_nocapture(ptr nocapture %src) @@ -482,8 +482,8 @@ define void @multi_bb_load_store(i1 %b) { bb0: %2 = call i32 @use_nocapture(ptr nocapture %dest) - call void @llvm.lifetime.end.p0(i64 4, ptr nocapture %src) - call void @llvm.lifetime.end.p0(i64 4, ptr nocapture %dest) + call void @llvm.lifetime.end.p0(ptr nocapture %src) + call void @llvm.lifetime.end.p0(ptr nocapture %dest) ret void } @@ -494,8 +494,8 @@ define void @multi_bb_separated_load_store(i1 %b) { ; CHECK-SAME: (i1 [[B:%.*]]) { ; CHECK-NEXT: [[SRC:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[DEST:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr captures(none) [[SRC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr captures(none) [[DEST]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]]) ; CHECK-NEXT: store i32 42, ptr [[SRC]], align 4 ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr captures(none) [[SRC]]) ; CHECK-NEXT: [[SRC_VAL:%.*]] = load i32, ptr [[SRC]], align 4 @@ -505,14 +505,14 @@ define void @multi_bb_separated_load_store(i1 %b) { ; CHECK-NEXT: br label [[BB1:%.*]] ; CHECK: bb1: ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use_nocapture(ptr captures(none) [[DEST]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr captures(none) [[SRC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr captures(none) [[DEST]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]]) ; CHECK-NEXT: ret void ; %src = alloca i32, align 4 %dest = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr nocapture %src) - call void @llvm.lifetime.start.p0(i64 4, ptr nocapture %dest) + call void @llvm.lifetime.start.p0(ptr nocapture %src) + call void @llvm.lifetime.start.p0(ptr nocapture %dest) store i32 42, ptr %src %1 = call i32 @use_nocapture(ptr nocapture %src) @@ -525,8 +525,8 @@ bb0: bb1: %2 = call i32 @use_nocapture(ptr nocapture %dest) - call void @llvm.lifetime.end.p0(i64 4, ptr nocapture %src) - call void @llvm.lifetime.end.p0(i64 4, ptr nocapture %dest) + call void @llvm.lifetime.end.p0(ptr nocapture %src) + call void @llvm.lifetime.end.p0(ptr nocapture %dest) ret void } @@ -548,8 +548,8 @@ define void @multi_bb_simple_br(i1 %b) { ; %src = alloca %struct.Foo, align 4 %dest = alloca %struct.Foo, align 4 - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.start.p0(ptr nocapture %src) + call void @llvm.lifetime.start.p0(ptr nocapture %dest) store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src %1 = call i32 @use_nocapture(ptr noundef nocapture %src) call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false) @@ -564,8 +564,8 @@ bb1: br label %bb2 bb2: - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.end.p0(ptr nocapture %src) + call void @llvm.lifetime.end.p0(ptr nocapture %dest) ret void } @@ -598,7 +598,7 @@ bb1: br label %bb2 bb2: - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.start.p0(ptr nocapture %dest) call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false) %1 = call i32 @use_nocapture(ptr noundef nocapture %dest) @@ -620,7 +620,7 @@ define void @multi_bb_dom_test1(i1 %b) { ; CHECK-NEXT: store [[STRUCT_FOO]] { i32 40, i32 50, i32 60 }, ptr [[SRC]], align 4 ; CHECK-NEXT: br label [[BB2]] ; CHECK: bb2: -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]]) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 12, i1 false) ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr noundef captures(none) [[DEST]]) ; CHECK-NEXT: ret void @@ -641,7 +641,7 @@ bb1: br label %bb2 bb2: - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.start.p0(ptr nocapture %dest) call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false); 1 %1 = call i32 @use_nocapture(ptr noundef nocapture %dest) @@ -671,7 +671,7 @@ define void @multi_bb_pdom_test0(i1 %b) { ; %src = alloca %struct.Foo, align 4 %dest = alloca %struct.Foo, align 4 - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.start.p0(ptr nocapture %dest) store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false); 1 br i1 %b, label %bb0, label %bb1 @@ -686,7 +686,7 @@ bb1: bb2: %3 = call i32 @use_nocapture(ptr noundef nocapture %dest) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.end.p0(ptr nocapture %dest) ret void uselistorder ptr %dest, { 2, 3, 0, 1, 4, 5 } @@ -711,7 +711,7 @@ define void @multi_bb_pdom_test1(i1 %b) { ; %src = alloca %struct.Foo, align 4 %dest = alloca %struct.Foo, align 4 - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.start.p0(ptr nocapture %dest) store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false); 1 br i1 %b, label %bb0, label %bb1 @@ -747,7 +747,7 @@ define void @multi_bb_pdom_test2(i1 %b) { ; %src = alloca %struct.Foo, align 4 %dest = alloca %struct.Foo, align 4 - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.start.p0(ptr nocapture %dest) store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false); 1 %1 = call i32 @use_nocapture(ptr noundef nocapture %dest) @@ -784,8 +784,8 @@ entry: %nlt1 = icmp slt i32 %n, 1 %src = alloca %struct.Foo, align 8 %dest = alloca %struct.Foo, align 8 - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.start.p0(ptr nocapture %src) + call void @llvm.lifetime.start.p0(ptr nocapture %dest) store %struct.Foo { i32 0, i32 1, i32 42 }, ptr %src br i1 %nlt1, label %loop_exit, label %loop_body @@ -816,8 +816,8 @@ define void @multi_bb_unreachable_modref(i1 %b0) { ; %src = alloca %struct.Foo, align 4 %dest = alloca %struct.Foo, align 4 - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.start.p0(ptr nocapture %src) + call void @llvm.lifetime.start.p0(ptr nocapture %dest) store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src %1 = call i32 @use_nocapture(ptr noundef nocapture %src) br i1 %b0, label %bb0, label %exit @@ -828,8 +828,8 @@ exit: bb0: call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.end.p0(ptr nocapture %src) + call void @llvm.lifetime.end.p0(ptr nocapture %dest) ret void } @@ -850,8 +850,8 @@ define void @multi_bb_non_dominated(i1 %b0, i1 %b1) { ; %src = alloca %struct.Foo, align 4 %dest = alloca %struct.Foo, align 4 - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.start.p0(ptr nocapture %src) + call void @llvm.lifetime.start.p0(ptr nocapture %dest) store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src %1 = call i32 @use_nocapture(ptr noundef nocapture %src) br i1 %b0, label %bb0, label %bb1 @@ -865,8 +865,8 @@ bb1: br label %bb2 bb2: - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.end.p0(ptr nocapture %src) + call void @llvm.lifetime.end.p0(ptr nocapture %dest) ret void } @@ -878,30 +878,30 @@ define void @memcpy_is_def() { ; CHECK-LABEL: define void @memcpy_is_def() { ; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4 ; CHECK-NEXT: [[DEST:%.*]] = alloca [[STRUCT_FOO]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]]) ; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4 ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr noundef captures(none) [[SRC]]) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 12, i1 false) ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use_nocapture(ptr noundef captures(none) [[DEST]]) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[SRC]], ptr align 4 [[DEST]], i64 12, i1 false) ; CHECK-NEXT: [[TMP3:%.*]] = call i32 @use_nocapture(ptr noundef captures(none) [[SRC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[SRC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]]) ; CHECK-NEXT: ret void ; %src = alloca %struct.Foo, align 4 %dest = alloca %struct.Foo, align 4 - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.start.p0(ptr nocapture %src) + call void @llvm.lifetime.start.p0(ptr nocapture %dest) store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src %1 = call i32 @use_nocapture(ptr noundef nocapture %src) call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false) %2 = call i32 @use_nocapture(ptr noundef nocapture %dest) call void @llvm.memcpy.p0.p0.i64(ptr align 4 %src, ptr align 4 %dest, i64 12, i1 false) %3 = call i32 @use_nocapture(ptr noundef nocapture %src) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.end.p0(ptr nocapture %src) + call void @llvm.lifetime.end.p0(ptr nocapture %dest) ret void } @@ -912,30 +912,30 @@ define void @memset_is_def() { ; CHECK-LABEL: define void @memset_is_def() { ; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4 ; CHECK-NEXT: [[DEST:%.*]] = alloca [[STRUCT_FOO]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]]) ; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4 ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr noundef captures(none) [[SRC]]) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 12, i1 false) ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use_nocapture(ptr noundef captures(none) [[DEST]]) ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[SRC]], i8 42, i64 12, i1 false) ; CHECK-NEXT: [[TMP3:%.*]] = call i32 @use_nocapture(ptr noundef captures(none) [[SRC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[SRC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]]) ; CHECK-NEXT: ret void ; %src = alloca %struct.Foo, align 4 %dest = alloca %struct.Foo, align 4 - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.start.p0(ptr nocapture %src) + call void @llvm.lifetime.start.p0(ptr nocapture %dest) store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src %1 = call i32 @use_nocapture(ptr noundef nocapture %src) call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false) %2 = call i32 @use_nocapture(ptr noundef nocapture %dest) call void @llvm.memset.p0.i64(ptr align 4 %src, i8 42, i64 12, i1 false) %3 = call i32 @use_nocapture(ptr noundef nocapture %src) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.end.p0(ptr nocapture %src) + call void @llvm.lifetime.end.p0(ptr nocapture %dest) ret void } @@ -946,8 +946,8 @@ define void @store_is_def() { ; CHECK-LABEL: define void @store_is_def() { ; CHECK-NEXT: [[SRC:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[DEST:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr captures(none) [[SRC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr captures(none) [[DEST]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]]) ; CHECK-NEXT: store i32 42, ptr [[SRC]], align 4 ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr noundef captures(none) [[SRC]]) ; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[SRC]], align 4 @@ -955,14 +955,14 @@ define void @store_is_def() { ; CHECK-NEXT: [[TMP3:%.*]] = call i32 @use_nocapture(ptr noundef captures(none) [[DEST]]) ; CHECK-NEXT: store i32 64, ptr [[SRC]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = call i32 @use_nocapture(ptr noundef captures(none) [[SRC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr captures(none) [[SRC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr captures(none) [[DEST]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]]) ; CHECK-NEXT: ret void ; %src = alloca i32, align 4 %dest = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr nocapture %src) - call void @llvm.lifetime.start.p0(i64 4, ptr nocapture %dest) + call void @llvm.lifetime.start.p0(ptr nocapture %src) + call void @llvm.lifetime.start.p0(ptr nocapture %dest) store i32 42, ptr %src %1 = call i32 @use_nocapture(ptr noundef nocapture %src) %2 = load i32, ptr %src @@ -970,8 +970,8 @@ define void @store_is_def() { %3 = call i32 @use_nocapture(ptr noundef nocapture %dest) store i32 64, ptr %src %4 = call i32 @use_nocapture(ptr noundef nocapture %src) - call void @llvm.lifetime.end.p0(i64 4, ptr nocapture %src) - call void @llvm.lifetime.end.p0(i64 4, ptr nocapture %dest) + call void @llvm.lifetime.end.p0(ptr nocapture %src) + call void @llvm.lifetime.end.p0(ptr nocapture %dest) ret void } @@ -982,8 +982,8 @@ define void @multi_bb_dataflow(i1 %b) { ; CHECK-SAME: (i1 [[B:%.*]]) { ; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4 ; CHECK-NEXT: [[DEST:%.*]] = alloca [[STRUCT_FOO]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]]) ; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4 ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr noundef captures(none) [[SRC]]) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 12, i1 false) @@ -995,14 +995,14 @@ define void @multi_bb_dataflow(i1 %b) { ; CHECK-NEXT: [[TMP3:%.*]] = call i32 @use_nocapture(ptr noundef captures(none) [[DEST]]) ; CHECK-NEXT: br label [[BB2]] ; CHECK: bb2: -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[SRC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]]) ; CHECK-NEXT: ret void ; %src = alloca %struct.Foo, align 4 %dest = alloca %struct.Foo, align 4 - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.start.p0(ptr nocapture %src) + call void @llvm.lifetime.start.p0(ptr nocapture %dest) store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src %1 = call i32 @use_nocapture(ptr noundef nocapture %src) call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false) @@ -1017,8 +1017,8 @@ bb1: br label %bb2 bb2: - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.end.p0(ptr nocapture %src) + call void @llvm.lifetime.end.p0(ptr nocapture %dest) ret void } @@ -1031,26 +1031,26 @@ define void @incomplete_memcpy() { ; CHECK-LABEL: define void @incomplete_memcpy() { ; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4 ; CHECK-NEXT: [[DEST:%.*]] = alloca [[STRUCT_FOO]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]]) ; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4 ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr noundef captures(none) [[SRC]]) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 11, i1 false) ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use_nocapture(ptr noundef captures(none) [[DEST]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[SRC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]]) ; CHECK-NEXT: ret void ; %src = alloca %struct.Foo, align 4 %dest = alloca %struct.Foo, align 4 - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.start.p0(ptr nocapture %src) + call void @llvm.lifetime.start.p0(ptr nocapture %dest) store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src %1 = call i32 @use_nocapture(ptr noundef nocapture %src) call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 11, i1 false) %2 = call i32 @use_nocapture(ptr noundef nocapture %dest) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.end.p0(ptr nocapture %src) + call void @llvm.lifetime.end.p0(ptr nocapture %dest) ret void } @@ -1060,28 +1060,28 @@ define void @incomplete_store() { ; CHECK-LABEL: define void @incomplete_store() { ; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4 ; CHECK-NEXT: [[DEST:%.*]] = alloca [[STRUCT_FOO]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]]) ; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4 ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr noundef captures(none) [[SRC]]) ; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[SRC]], align 4 ; CHECK-NEXT: store i32 [[TMP2]], ptr [[DEST]], align 4 ; CHECK-NEXT: [[TMP3:%.*]] = call i32 @use_nocapture(ptr noundef captures(none) [[DEST]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[SRC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]]) ; CHECK-NEXT: ret void ; %src = alloca %struct.Foo, align 4 %dest = alloca %struct.Foo, align 4 - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.start.p0(ptr nocapture %src) + call void @llvm.lifetime.start.p0(ptr nocapture %dest) store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src %1 = call i32 @use_nocapture(ptr noundef nocapture %src) %2 = load i32, ptr %src store i32 %2, ptr %dest %3 = call i32 @use_nocapture(ptr noundef nocapture %dest) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.end.p0(ptr nocapture %src) + call void @llvm.lifetime.end.p0(ptr nocapture %dest) ret void } @@ -1091,28 +1091,28 @@ define void @dynamically_sized_alloca(i64 %i) { ; CHECK-SAME: (i64 [[I:%.*]]) { ; CHECK-NEXT: [[SRC:%.*]] = alloca i8, i64 [[I]], align 4 ; CHECK-NEXT: [[DEST:%.*]] = alloca i8, i64 [[I]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr captures(none) [[SRC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr captures(none) [[DEST]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]]) ; CHECK-NEXT: store [[STRUCT_FOO:%.*]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4 ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr captures(none) [[SRC]]) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 12, i1 false) ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use_nocapture(ptr captures(none) [[DEST]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr captures(none) [[SRC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr captures(none) [[DEST]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]]) ; CHECK-NEXT: ret void ; %src = alloca i8, i64 %i, align 4 %dest = alloca i8, i64 %i, align 4 - call void @llvm.lifetime.start.p0(i64 -1, ptr nocapture %src) - call void @llvm.lifetime.start.p0(i64 -1, ptr nocapture %dest) + call void @llvm.lifetime.start.p0(ptr nocapture %src) + call void @llvm.lifetime.start.p0(ptr nocapture %dest) store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src %1 = call i32 @use_nocapture(ptr nocapture %src) call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false) %2 = call i32 @use_nocapture(ptr nocapture %dest) - call void @llvm.lifetime.end.p0(i64 -1, ptr nocapture %src) - call void @llvm.lifetime.end.p0(i64 -1, ptr nocapture %dest) + call void @llvm.lifetime.end.p0(ptr nocapture %src) + call void @llvm.lifetime.end.p0(ptr nocapture %dest) ret void } @@ -1122,28 +1122,28 @@ define void @inalloca() { ; CHECK-LABEL: define void @inalloca() { ; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4 ; CHECK-NEXT: [[DEST:%.*]] = alloca inalloca [[STRUCT_FOO]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]]) ; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4 ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr captures(none) [[SRC]]) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 12, i1 false) ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use_nocapture(ptr captures(none) [[DEST]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[SRC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]]) ; CHECK-NEXT: ret void ; %src = alloca %struct.Foo, align 4 %dest = alloca inalloca %struct.Foo, align 4 - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.start.p0(ptr nocapture %src) + call void @llvm.lifetime.start.p0(ptr nocapture %dest) store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src %1 = call i32 @use_nocapture(ptr nocapture %src) call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false) %2 = call i32 @use_nocapture(ptr nocapture %dest) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.end.p0(ptr nocapture %src) + call void @llvm.lifetime.end.p0(ptr nocapture %dest) ret void } @@ -1153,28 +1153,28 @@ define void @dynamically_sized_memcpy(i64 %size) { ; CHECK-SAME: (i64 [[SIZE:%.*]]) { ; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4 ; CHECK-NEXT: [[DEST:%.*]] = alloca [[STRUCT_FOO]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]]) ; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4 ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr captures(none) [[SRC]]) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 [[SIZE]], i1 false) ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use_nocapture(ptr captures(none) [[DEST]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[SRC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]]) ; CHECK-NEXT: ret void ; %src = alloca %struct.Foo, align 4 %dest = alloca %struct.Foo, align 4 - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.start.p0(ptr nocapture %src) + call void @llvm.lifetime.start.p0(ptr nocapture %dest) store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src %1 = call i32 @use_nocapture(ptr nocapture %src) call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 %size, i1 false) %2 = call i32 @use_nocapture(ptr nocapture %dest) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src) + call void @llvm.lifetime.end.p0(ptr nocapture %dest) + call void @llvm.lifetime.end.p0(ptr nocapture %src) ret void } @@ -1183,28 +1183,28 @@ define void @mismatched_alloca_size() { ; CHECK-LABEL: define void @mismatched_alloca_size() { ; CHECK-NEXT: [[SRC:%.*]] = alloca i8, i64 24, align 4 ; CHECK-NEXT: [[DEST:%.*]] = alloca i8, i64 12, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr captures(none) [[SRC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]]) ; CHECK-NEXT: store [[STRUCT_FOO:%.*]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4 ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr captures(none) [[SRC]]) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 12, i1 false) ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use_nocapture(ptr captures(none) [[DEST]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr captures(none) [[SRC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]]) ; CHECK-NEXT: ret void ; %src = alloca i8, i64 24, align 4 %dest = alloca i8, i64 12, align 4 - call void @llvm.lifetime.start.p0(i64 24, ptr nocapture %src) - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.start.p0(ptr nocapture %src) + call void @llvm.lifetime.start.p0(ptr nocapture %dest) store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src %1 = call i32 @use_nocapture(ptr nocapture %src) call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false) %2 = call i32 @use_nocapture(ptr nocapture %dest) - call void @llvm.lifetime.end.p0(i64 24, ptr nocapture %src) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.end.p0(ptr nocapture %src) + call void @llvm.lifetime.end.p0(ptr nocapture %dest) ret void } @@ -1213,28 +1213,28 @@ define void @mismatched_alloca_addrspace() { ; CHECK-LABEL: define void @mismatched_alloca_addrspace() { ; CHECK-NEXT: [[SRC:%.*]] = alloca i8, i64 24, align 4, addrspace(1) ; CHECK-NEXT: [[DEST:%.*]] = alloca i8, i64 12, align 4, addrspace(2) -; CHECK-NEXT: call void @llvm.lifetime.start.p1(i64 24, ptr addrspace(1) captures(none) [[SRC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p2(i64 12, ptr addrspace(2) captures(none) [[DEST]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p1(ptr addrspace(1) captures(none) [[SRC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p2(ptr addrspace(2) captures(none) [[DEST]]) ; CHECK-NEXT: store [[STRUCT_FOO:%.*]] { i32 10, i32 20, i32 30 }, ptr addrspace(1) [[SRC]], align 4 ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr addrspace(1) captures(none) [[SRC]]) ; CHECK-NEXT: call void @llvm.memcpy.p2.p1.i64(ptr addrspace(2) align 4 [[DEST]], ptr addrspace(1) align 4 [[SRC]], i64 12, i1 false) -; CHECK-NEXT: call void @llvm.lifetime.end.p1(i64 24, ptr addrspace(1) captures(none) [[SRC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p1(ptr addrspace(1) captures(none) [[SRC]]) ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use_nocapture(ptr addrspace(2) captures(none) [[DEST]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p2(i64 12, ptr addrspace(2) captures(none) [[DEST]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p2(ptr addrspace(2) captures(none) [[DEST]]) ; CHECK-NEXT: ret void ; %src = alloca i8, i64 24, align 4, addrspace(1) %dest = alloca i8, i64 12, align 4, addrspace(2) - call void @llvm.lifetime.start.p1(i64 24, ptr addrspace(1) nocapture %src) - call void @llvm.lifetime.start.p2(i64 12, ptr addrspace(2) nocapture %dest) + call void @llvm.lifetime.start.p1(ptr addrspace(1) nocapture %src) + call void @llvm.lifetime.start.p2(ptr addrspace(2) nocapture %dest) store %struct.Foo { i32 10, i32 20, i32 30 }, ptr addrspace(1) %src %1 = call i32 @use_nocapture(ptr addrspace(1) nocapture %src) call void @llvm.memcpy.p2.p1.i64(ptr addrspace(2) align 4 %dest, ptr addrspace(1) align 4 %src, i64 12, i1 false) - call void @llvm.lifetime.end.p1(i64 24, ptr addrspace(1) nocapture %src) + call void @llvm.lifetime.end.p1(ptr addrspace(1) nocapture %src) %2 = call i32 @use_nocapture(ptr addrspace(2) nocapture %dest) - call void @llvm.lifetime.end.p2(i64 12, ptr addrspace(2) nocapture %dest) + call void @llvm.lifetime.end.p2(ptr addrspace(2) nocapture %dest) ret void } @@ -1243,28 +1243,28 @@ define void @volatile_memcpy() { ; CHECK-LABEL: define void @volatile_memcpy() { ; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4 ; CHECK-NEXT: [[DEST:%.*]] = alloca [[STRUCT_FOO]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]]) ; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4 ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr captures(none) [[SRC]]) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 12, i1 true) ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use_nocapture(ptr captures(none) [[DEST]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[SRC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]]) ; CHECK-NEXT: ret void ; %src = alloca %struct.Foo, align 4 %dest = alloca %struct.Foo, align 4 - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.start.p0(ptr nocapture %src) + call void @llvm.lifetime.start.p0(ptr nocapture %dest) store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src %1 = call i32 @use_nocapture(ptr nocapture %src) call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 true) %2 = call i32 @use_nocapture(ptr nocapture %dest) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.end.p0(ptr nocapture %src) + call void @llvm.lifetime.end.p0(ptr nocapture %dest) ret void } @@ -1273,28 +1273,28 @@ define void @dest_captured() { ; CHECK-LABEL: define void @dest_captured() { ; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4 ; CHECK-NEXT: [[DEST:%.*]] = alloca [[STRUCT_FOO]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]]) ; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4 ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr captures(none) [[SRC]]) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 12, i1 false) ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use_maycapture(ptr [[DEST]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[SRC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]]) ; CHECK-NEXT: ret void ; %src = alloca %struct.Foo, align 4 %dest = alloca %struct.Foo, align 4 - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.start.p0(ptr nocapture %src) + call void @llvm.lifetime.start.p0(ptr nocapture %dest) store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src %1 = call i32 @use_nocapture(ptr nocapture %src) call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false) %2 = call i32 @use_maycapture(ptr %dest) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.end.p0(ptr nocapture %src) + call void @llvm.lifetime.end.p0(ptr nocapture %dest) ret void } @@ -1303,28 +1303,28 @@ define void @src_captured() { ; CHECK-LABEL: define void @src_captured() { ; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4 ; CHECK-NEXT: [[DEST:%.*]] = alloca [[STRUCT_FOO]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]]) ; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4 ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_maycapture(ptr [[SRC]]) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 12, i1 false) ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use_nocapture(ptr captures(none) [[DEST]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[SRC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]]) ; CHECK-NEXT: ret void ; %src = alloca %struct.Foo, align 4 %dest = alloca %struct.Foo, align 4 - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.start.p0(ptr nocapture %src) + call void @llvm.lifetime.start.p0(ptr nocapture %dest) store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src %1 = call i32 @use_maycapture(ptr %src) call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false) %2 = call i32 @use_nocapture(ptr nocapture %dest) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.end.p0(ptr nocapture %src) + call void @llvm.lifetime.end.p0(ptr nocapture %dest) ret void } @@ -1334,30 +1334,30 @@ define void @mod_ref_before_copy() { ; CHECK-LABEL: define void @mod_ref_before_copy() { ; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4 ; CHECK-NEXT: [[DEST:%.*]] = alloca [[STRUCT_FOO]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]]) ; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4 ; CHECK-NEXT: [[R:%.*]] = call i32 @use_readonly(ptr captures(none) [[DEST]]) ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr captures(none) [[SRC]]) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 12, i1 false) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[SRC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]]) ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use_nocapture(ptr captures(none) [[DEST]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]]) ; CHECK-NEXT: ret void ; %src = alloca %struct.Foo, align 4 %dest = alloca %struct.Foo, align 4 - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.start.p0(ptr nocapture %src) + call void @llvm.lifetime.start.p0(ptr nocapture %dest) store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src %r = call i32 @use_readonly(ptr nocapture %dest) %1 = call i32 @use_nocapture(ptr nocapture %src) call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src) + call void @llvm.lifetime.end.p0(ptr nocapture %src) %2 = call i32 @use_nocapture(ptr nocapture %dest) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.end.p0(ptr nocapture %dest) ret void } @@ -1366,30 +1366,30 @@ define void @mod_dest_before_copy() { ; CHECK-LABEL: define void @mod_dest_before_copy() { ; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4 ; CHECK-NEXT: [[DEST:%.*]] = alloca [[STRUCT_FOO]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]]) ; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4 ; CHECK-NEXT: store i32 13, ptr [[DEST]], align 4 ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr captures(none) [[SRC]]) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 12, i1 false) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[SRC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]]) ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use_nocapture(ptr captures(none) [[DEST]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]]) ; CHECK-NEXT: ret void ; %src = alloca %struct.Foo, align 4 %dest = alloca %struct.Foo, align 4 - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.start.p0(ptr nocapture %src) + call void @llvm.lifetime.start.p0(ptr nocapture %dest) store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src store i32 13, ptr %dest %1 = call i32 @use_nocapture(ptr nocapture %src) call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src) + call void @llvm.lifetime.end.p0(ptr nocapture %src) %2 = call i32 @use_nocapture(ptr nocapture %dest) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.end.p0(ptr nocapture %dest) ret void } @@ -1397,22 +1397,22 @@ define void @mod_src_before_store_after_load() { ; CHECK-LABEL: define void @mod_src_before_store_after_load() { ; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4 ; CHECK-NEXT: [[DEST:%.*]] = alloca [[STRUCT_FOO]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]]) ; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4 ; CHECK-NEXT: store i32 13, ptr [[DEST]], align 4 ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr captures(none) [[SRC]]) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 12, i1 false) ; CHECK-NEXT: store [[STRUCT_FOO]] { i32 13, i32 13, i32 13 }, ptr [[SRC]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[SRC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]]) ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use_nocapture(ptr captures(none) [[DEST]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]]) ; CHECK-NEXT: ret void ; %src = alloca %struct.Foo, align 4 %dest = alloca %struct.Foo, align 4 - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.start.p0(ptr nocapture %src) + call void @llvm.lifetime.start.p0(ptr nocapture %dest) store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src store i32 13, ptr %dest %1 = call i32 @use_nocapture(ptr nocapture %src) @@ -1421,9 +1421,9 @@ define void @mod_src_before_store_after_load() { store %struct.Foo { i32 13, i32 13, i32 13 }, ptr %src store %struct.Foo %src.val, ptr %dest - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src) + call void @llvm.lifetime.end.p0(ptr nocapture %src) %2 = call i32 @use_nocapture(ptr nocapture %dest) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.end.p0(ptr nocapture %dest) ret void } @@ -1433,28 +1433,28 @@ define void @src_mod_dest_ref_after_copy() { ; CHECK-LABEL: define void @src_mod_dest_ref_after_copy() { ; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4 ; CHECK-NEXT: [[DEST:%.*]] = alloca [[STRUCT_FOO]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]]) ; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 12, i1 false) ; CHECK-NEXT: store [[STRUCT_FOO]] { i32 13, i32 13, i32 13 }, ptr [[SRC]], align 4 ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr captures(none) [[DEST]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[SRC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]]) ; CHECK-NEXT: ret void ; %src = alloca %struct.Foo, align 4 %dest = alloca %struct.Foo, align 4 - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.start.p0(ptr nocapture %src) + call void @llvm.lifetime.start.p0(ptr nocapture %dest) store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false) store %struct.Foo { i32 13, i32 13, i32 13 }, ptr %src %1 = call i32 @use_nocapture(ptr nocapture %dest) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.end.p0(ptr nocapture %src) + call void @llvm.lifetime.end.p0(ptr nocapture %dest) ret void } @@ -1464,28 +1464,28 @@ define void @src_ref_dest_mod_after_copy() { ; CHECK-LABEL: define void @src_ref_dest_mod_after_copy() { ; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4 ; CHECK-NEXT: [[DEST:%.*]] = alloca [[STRUCT_FOO]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]]) ; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 12, i1 false) ; CHECK-NEXT: store [[STRUCT_FOO]] { i32 13, i32 13, i32 13 }, ptr [[DEST]], align 4 ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr captures(none) [[SRC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[SRC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]]) ; CHECK-NEXT: ret void ; %src = alloca %struct.Foo, align 4 %dest = alloca %struct.Foo, align 4 - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.start.p0(ptr nocapture %src) + call void @llvm.lifetime.start.p0(ptr nocapture %dest) store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false) store %struct.Foo { i32 13, i32 13, i32 13 }, ptr %dest %1 = call i32 @use_nocapture(ptr nocapture %src) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.end.p0(ptr nocapture %src) + call void @llvm.lifetime.end.p0(ptr nocapture %dest) ret void } @@ -1494,22 +1494,22 @@ define void @dest_alias_mod_before_copy() { ; CHECK-LABEL: define void @dest_alias_mod_before_copy() { ; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4 ; CHECK-NEXT: [[DEST:%.*]] = alloca [[STRUCT_FOO]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]]) ; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4 ; CHECK-NEXT: [[DEST_ALIAS:%.*]] = getelementptr inbounds [[STRUCT_FOO]], ptr [[DEST]], i64 0, i32 1 ; CHECK-NEXT: store i32 13, ptr [[DEST_ALIAS]], align 4 ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr captures(none) [[SRC]]) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 12, i1 false) ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use_nocapture(ptr captures(none) [[DEST]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[SRC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]]) ; CHECK-NEXT: ret void ; %src = alloca %struct.Foo, align 4 %dest = alloca %struct.Foo, align 4 - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.start.p0(ptr nocapture %src) + call void @llvm.lifetime.start.p0(ptr nocapture %dest) store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src %dest.alias = getelementptr inbounds %struct.Foo, ptr %dest, i64 0, i32 1 store i32 13, ptr %dest.alias @@ -1518,8 +1518,8 @@ define void @dest_alias_mod_before_copy() { call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false) %2 = call i32 @use_nocapture(ptr nocapture %dest) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.end.p0(ptr nocapture %src) + call void @llvm.lifetime.end.p0(ptr nocapture %dest) ret void } @@ -1528,22 +1528,22 @@ define void @alias_src_ref_dest_mod_after_copy() { ; CHECK-LABEL: define void @alias_src_ref_dest_mod_after_copy() { ; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4 ; CHECK-NEXT: [[DEST:%.*]] = alloca [[STRUCT_FOO]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]]) ; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4 ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr captures(none) [[SRC]]) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 12, i1 false) ; CHECK-NEXT: [[DEST_ALIAS:%.*]] = getelementptr inbounds [[STRUCT_FOO]], ptr [[DEST]], i64 0, i32 1 ; CHECK-NEXT: store i32 13, ptr [[DEST_ALIAS]], align 4 ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use_nocapture(ptr captures(none) [[SRC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[SRC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]]) ; CHECK-NEXT: ret void ; %src = alloca %struct.Foo, align 4 %dest = alloca %struct.Foo, align 4 - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.start.p0(ptr nocapture %src) + call void @llvm.lifetime.start.p0(ptr nocapture %dest) store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src %1 = call i32 @use_nocapture(ptr nocapture %src) @@ -1552,8 +1552,8 @@ define void @alias_src_ref_dest_mod_after_copy() { %dest.alias = getelementptr inbounds %struct.Foo, ptr %dest, i64 0, i32 1 store i32 13, ptr %dest.alias %2 = call i32 @use_nocapture(ptr nocapture %src) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.end.p0(ptr nocapture %src) + call void @llvm.lifetime.end.p0(ptr nocapture %dest) ret void } @@ -1564,8 +1564,8 @@ define void @multi_bb_dataflow_conflict(i1 %b) { ; CHECK-SAME: (i1 [[B:%.*]]) { ; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4 ; CHECK-NEXT: [[DEST:%.*]] = alloca [[STRUCT_FOO]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]]) ; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4 ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr noundef captures(none) [[SRC]]) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 12, i1 false) @@ -1578,14 +1578,14 @@ define void @multi_bb_dataflow_conflict(i1 %b) { ; CHECK-NEXT: br label [[BB2]] ; CHECK: bb2: ; CHECK-NEXT: [[TMP4:%.*]] = call i32 @use_nocapture(ptr noundef captures(none) [[DEST]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[SRC]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]]) ; CHECK-NEXT: ret void ; %src = alloca %struct.Foo, align 4 %dest = alloca %struct.Foo, align 4 - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.start.p0(ptr nocapture %src) + call void @llvm.lifetime.start.p0(ptr nocapture %dest) store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src %1 = call i32 @use_nocapture(ptr noundef nocapture %src) call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false) @@ -1601,8 +1601,8 @@ bb1: bb2: %4 = call i32 @use_nocapture(ptr noundef nocapture %dest) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.end.p0(ptr nocapture %src) + call void @llvm.lifetime.end.p0(ptr nocapture %dest) ret void } @@ -1614,8 +1614,8 @@ define void @multi_bb_loop_dest_mod_before_copy(i32 %n) { ; CHECK-NEXT: [[NLT1:%.*]] = icmp slt i32 [[N]], 1 ; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 8 ; CHECK-NEXT: [[DEST:%.*]] = alloca [[STRUCT_FOO]], align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]]) ; CHECK-NEXT: store [[STRUCT_FOO]] { i32 0, i32 1, i32 42 }, ptr [[SRC]], align 4 ; CHECK-NEXT: br i1 [[NLT1]], label [[LOOP_EXIT:%.*]], label [[LOOP_BODY:%.*]] ; CHECK: loop_body: @@ -1632,8 +1632,8 @@ entry: %nlt1 = icmp slt i32 %n, 1 %src = alloca %struct.Foo, align 8 %dest = alloca %struct.Foo, align 8 - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.start.p0(ptr nocapture %src) + call void @llvm.lifetime.start.p0(ptr nocapture %dest) store %struct.Foo { i32 0, i32 1, i32 42 }, ptr %src br i1 %nlt1, label %loop_exit, label %loop_body @@ -1660,17 +1660,17 @@ define void @partial_lifetime() { ; %src = alloca %struct.Foo, align 4 %dest = alloca %struct.Foo, align 4 - call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.start.p0(i64 3, ptr nocapture %dest) + call void @llvm.lifetime.start.p0(ptr nocapture %src) + call void @llvm.lifetime.start.p0(ptr nocapture %dest) store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src %1 = call i32 @use_nocapture(ptr nocapture %src) call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false) - call void @llvm.lifetime.end.p0(i64 3, ptr nocapture %src) + call void @llvm.lifetime.end.p0(ptr nocapture %src) %2 = call i32 @use_nocapture(ptr nocapture %dest) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src) - call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest) + call void @llvm.lifetime.end.p0(ptr nocapture %src) + call void @llvm.lifetime.end.p0(ptr nocapture %dest) ret void } diff --git a/llvm/test/Transforms/MergeICmps/X86/int64-and-ptr.ll b/llvm/test/Transforms/MergeICmps/X86/int64-and-ptr.ll index 179d6e6..e2f5007 100644 --- a/llvm/test/Transforms/MergeICmps/X86/int64-and-ptr.ll +++ b/llvm/test/Transforms/MergeICmps/X86/int64-and-ptr.ll @@ -35,10 +35,10 @@ if.end5: ; preds = %if.then, %entry ret i1 %rez.0 } ; Function Attrs: argmemonly nounwind -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 ; Function Attrs: argmemonly nounwind -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 attributes #0 = { nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } attributes #1 = { argmemonly nounwind } diff --git a/llvm/test/Transforms/MoveAutoInit/clobber.ll b/llvm/test/Transforms/MoveAutoInit/clobber.ll index 08ffb13..f52034d 100644 --- a/llvm/test/Transforms/MoveAutoInit/clobber.ll +++ b/llvm/test/Transforms/MoveAutoInit/clobber.ll @@ -10,9 +10,9 @@ define i32 @foo(i32 noundef %0, i32 noundef %1, i32 noundef %2) #0 { ; CHECK-NEXT: [[TMP4:%.*]] = alloca [100 x i8], align 16 ; CHECK-NEXT: [[TMP5:%.*]] = alloca [2 x i8], align 1 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [100 x i8], ptr [[TMP4]], i64 0, i64 0 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 100, ptr nonnull [[TMP4]]) #[[ATTR3:[0-9]+]] +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP4]]) #[[ATTR3:[0-9]+]] ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x i8], ptr [[TMP5]], i64 0, i64 0 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr nonnull [[TMP5]]) #[[ATTR3]] +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP5]]) #[[ATTR3]] ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x i8], ptr [[TMP5]], i64 0, i64 1 ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP1:%.*]], 0 ; CHECK-NEXT: br i1 [[TMP9]], label [[TMP15:%.*]], label [[TMP10:%.*]] @@ -38,19 +38,19 @@ define i32 @foo(i32 noundef %0, i32 noundef %1, i32 noundef %2) #0 { ; CHECK-NEXT: br label [[TMP22]] ; CHECK: 22: ; CHECK-NEXT: [[TMP23:%.*]] = phi i32 [ [[TMP14]], [[TMP10]] ], [ [[TMP21]], [[TMP17]] ], [ 0, [[TMP15]] ] -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr nonnull [[TMP5]]) #[[ATTR3]] -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 100, ptr nonnull [[TMP4]]) #[[ATTR3]] +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP5]]) #[[ATTR3]] +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP4]]) #[[ATTR3]] ; CHECK-NEXT: ret i32 [[TMP23]] ; %4 = alloca [100 x i8], align 16 %5 = alloca [2 x i8], align 1 %6 = getelementptr inbounds [100 x i8], ptr %4, i64 0, i64 0 - call void @llvm.lifetime.start.p0(i64 100, ptr nonnull %4) #3 + call void @llvm.lifetime.start.p0(ptr nonnull %4) #3 ; This memset must move. call void @llvm.memset.p0.i64(ptr noundef nonnull align 16 dereferenceable(100) %6, i8 -86, i64 100, i1 false), !annotation !0 %7 = getelementptr inbounds [2 x i8], ptr %5, i64 0, i64 0 - call void @llvm.lifetime.start.p0(i64 2, ptr nonnull %5) #3 + call void @llvm.lifetime.start.p0(ptr nonnull %5) #3 ; This store must move. store i8 -86, ptr %7, align 1, !annotation !0 %8 = getelementptr inbounds [2 x i8], ptr %5, i64 0, i64 1 @@ -81,16 +81,16 @@ define i32 @foo(i32 noundef %0, i32 noundef %1, i32 noundef %2) #0 { 22: %23 = phi i32 [ %14, %10 ], [ %21, %17 ], [ 0, %15 ] - call void @llvm.lifetime.end.p0(i64 2, ptr nonnull %5) #3 - call void @llvm.lifetime.end.p0(i64 100, ptr nonnull %4) #3 + call void @llvm.lifetime.end.p0(ptr nonnull %5) #3 + call void @llvm.lifetime.end.p0(ptr nonnull %4) #3 ret i32 %23 } -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg) #2 -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 attributes #0 = { mustprogress nofree nosync nounwind readnone uwtable willreturn } attributes #1 = { argmemonly mustprogress nofree nosync nounwind willreturn } diff --git a/llvm/test/Transforms/NewGVN/coercion-different-ptr.ll b/llvm/test/Transforms/NewGVN/coercion-different-ptr.ll index dfd6d7d..979aa69 100644 --- a/llvm/test/Transforms/NewGVN/coercion-different-ptr.ll +++ b/llvm/test/Transforms/NewGVN/coercion-different-ptr.ll @@ -11,7 +11,7 @@ define void @foo(ptr %arg) { ; CHECK-SAME: ptr [[ARG:%.*]]) { ; CHECK-NEXT: [[BB:.*:]] ; CHECK-NEXT: [[ALLOCA:%.*]] = alloca i8, align 16 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[ALLOCA]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ALLOCA]]) ; CHECK-NEXT: [[LOAD:%.*]] = load ptr, ptr [[ARG]], align 8 ; CHECK-NEXT: [[LOAD1:%.*]] = load ptr, ptr [[LOAD]], align 8 ; CHECK-NEXT: [[CALL:%.*]] = call ptr [[LOAD1]](ptr [[ALLOCA]]) @@ -19,14 +19,14 @@ define void @foo(ptr %arg) { ; bb: %alloca = alloca i8, align 16 - call void @llvm.lifetime.start.p0(i64 1, ptr %alloca) + call void @llvm.lifetime.start.p0(ptr %alloca) %load = load ptr, ptr %arg, align 8 %load1 = load ptr, ptr %load, align 8 %call = call ptr %load1(ptr %alloca) ret void } -declare void @llvm.lifetime.start.p0(i64 immarg, ptr captures(none)) #0 +declare void @llvm.lifetime.start.p0(ptr captures(none)) #0 declare ptr @malloc(i64) diff --git a/llvm/test/Transforms/NewGVN/cond_br2-xfail.ll b/llvm/test/Transforms/NewGVN/cond_br2-xfail.ll index 017f608..8b2d662 100644 --- a/llvm/test/Transforms/NewGVN/cond_br2-xfail.ll +++ b/llvm/test/Transforms/NewGVN/cond_br2-xfail.ll @@ -18,7 +18,7 @@ define void @_Z4testv() #0 personality ptr @__gxx_personality_v0 { entry: %sv = alloca %"class.llvm::SmallVector", align 16 - call void @llvm.lifetime.start.p0(i64 64, ptr %sv) #1 + call void @llvm.lifetime.start.p0(ptr %sv) #1 %FirstEl.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SmallVector", ptr %sv, i64 0, i32 0, i32 0, i32 0, i32 0, i32 3 store ptr %FirstEl.i.i.i.i.i.i, ptr %sv, align 16, !tbaa !4 %EndX.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SmallVector", ptr %sv, i64 0, i32 0, i32 0, i32 0, i32 0, i32 1 @@ -87,7 +87,7 @@ if.then.i.i.i20: ; preds = %invoke.cont3 br label %_ZN4llvm11SmallVectorIiLj8EED1Ev.exit21 _ZN4llvm11SmallVectorIiLj8EED1Ev.exit21: ; preds = %invoke.cont3, %if.then.i.i.i20 - call void @llvm.lifetime.end.p0(i64 64, ptr %sv) #1 + call void @llvm.lifetime.end.p0(ptr %sv) #1 ret void lpad: ; preds = %if.end.i14, %if.end.i, %invoke.cont2 @@ -106,14 +106,14 @@ eh.resume: ; preds = %if.then.i.i.i, %lpa } ; Function Attrs: nounwind -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 declare i32 @__gxx_personality_v0(...) declare void @_Z1gRN4llvm11SmallVectorIiLj8EEE(ptr) #2 ; Function Attrs: nounwind -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 declare void @_ZN4llvm15SmallVectorBase8grow_podEmm(ptr, i64, i64) #2 diff --git a/llvm/test/Transforms/NewGVN/lifetime-simple.ll b/llvm/test/Transforms/NewGVN/lifetime-simple.ll index 0a7bd33..7fe6649 100644 --- a/llvm/test/Transforms/NewGVN/lifetime-simple.ll +++ b/llvm/test/Transforms/NewGVN/lifetime-simple.ll @@ -9,21 +9,21 @@ define i8 @test() nounwind { ; CHECK-SAME: ) #[[ATTR0:[0-9]+]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[P:%.*]] = alloca [32 x i8], align 1 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[P]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[P]]) ; CHECK-NEXT: store i8 1, ptr [[P]], align 1 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr [[P]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[P]]) ; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[P]], align 1 ; CHECK-NEXT: ret i8 [[TMP0]] ; entry: %P = alloca [32 x i8] - call void @llvm.lifetime.start.p0(i64 32, ptr %P) + call void @llvm.lifetime.start.p0(ptr %P) %0 = load i8, ptr %P store i8 1, ptr %P - call void @llvm.lifetime.end.p0(i64 32, ptr %P) + call void @llvm.lifetime.end.p0(ptr %P) %1 = load i8, ptr %P ret i8 %1 } -declare void @llvm.lifetime.start.p0(i64 %S, ptr nocapture %P) readonly -declare void @llvm.lifetime.end.p0(i64 %S, ptr nocapture %P) +declare void @llvm.lifetime.start.p0(ptr nocapture %P) readonly +declare void @llvm.lifetime.end.p0(ptr nocapture %P) diff --git a/llvm/test/Transforms/NewGVN/verify-memoryphi.ll b/llvm/test/Transforms/NewGVN/verify-memoryphi.ll index a19a2a6..15bb1cb 100644 --- a/llvm/test/Transforms/NewGVN/verify-memoryphi.ll +++ b/llvm/test/Transforms/NewGVN/verify-memoryphi.ll @@ -5,7 +5,7 @@ ; REQUIRES: asserts -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) define void @tinkywinky() { ; CHECK-LABEL: define void @tinkywinky() { @@ -20,11 +20,11 @@ define void @tinkywinky() { ; entry: %a = alloca i8 - call void @llvm.lifetime.start.p0(i64 4, ptr %a) + call void @llvm.lifetime.start.p0(ptr %a) br i1 false, label %body, label %end body: - call void @llvm.lifetime.start.p0(i64 4, ptr %a) + call void @llvm.lifetime.start.p0(ptr %a) br label %end end: diff --git a/llvm/test/Transforms/NewGVN/vscale.ll b/llvm/test/Transforms/NewGVN/vscale.ll index 7021172..64e22e1 100644 --- a/llvm/test/Transforms/NewGVN/vscale.ll +++ b/llvm/test/Transforms/NewGVN/vscale.ll @@ -579,7 +579,7 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 1 ; CHECK-LABEL: @bigexample( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[REF_TMP:%.*]] = alloca { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> }, align 16 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr nonnull [[REF_TMP]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[REF_TMP]]) ; CHECK-NEXT: [[A_ELT:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } [[A:%.*]], 0 ; CHECK-NEXT: store <vscale x 4 x i32> [[A_ELT]], ptr [[REF_TMP]], align 16 ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() @@ -603,12 +603,12 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 1 ; CHECK-NEXT: [[TMP12:%.*]] = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP9]], <vscale x 16 x i8> [[DOTUNPACK10]], 2 ; CHECK-NEXT: [[DOTUNPACK12:%.*]] = load <vscale x 16 x i8>, ptr [[REF_TMP_REPACK5]], align 16 ; CHECK-NEXT: [[TMP15:%.*]] = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP12]], <vscale x 16 x i8> [[DOTUNPACK12]], 3 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr nonnull [[REF_TMP]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[REF_TMP]]) ; CHECK-NEXT: ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP15]] ; entry: %ref.tmp = alloca { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> }, align 16 - call void @llvm.lifetime.start.p0(i64 -1, ptr nonnull %ref.tmp) + call void @llvm.lifetime.start.p0(ptr nonnull %ref.tmp) %a.elt = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } %a, 0 store <vscale x 4 x i32> %a.elt, ptr %ref.tmp, align 16 %0 = call i64 @llvm.vscale.i64() @@ -643,7 +643,7 @@ entry: %.elt11 = getelementptr inbounds i8, ptr %ref.tmp, i64 %14 %.unpack12 = load <vscale x 16 x i8>, ptr %.elt11, align 16 %15 = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %12, <vscale x 16 x i8> %.unpack12, 3 - call void @llvm.lifetime.end.p0(i64 -1, ptr nonnull %ref.tmp) + call void @llvm.lifetime.end.p0(ptr nonnull %ref.tmp) ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %15 } diff --git a/llvm/test/Transforms/ObjCARC/inlined-autorelease-return-value.ll b/llvm/test/Transforms/ObjCARC/inlined-autorelease-return-value.ll index 180fd0a..694deb3 100644 --- a/llvm/test/Transforms/ObjCARC/inlined-autorelease-return-value.ll +++ b/llvm/test/Transforms/ObjCARC/inlined-autorelease-return-value.ll @@ -7,8 +7,8 @@ declare ptr @llvm.objc.autoreleaseReturnValue(ptr) declare ptr @llvm.objc.retainAutoreleasedReturnValue(ptr) declare ptr @llvm.objc.unsafeClaimAutoreleasedReturnValue(ptr) declare void @opaque() -declare void @llvm.lifetime.start(i64, ptr nocapture) -declare void @llvm.lifetime.end(i64, ptr nocapture) +declare void @llvm.lifetime.start(ptr nocapture) +declare void @llvm.lifetime.end(ptr nocapture) ; CHECK-LABEL: define ptr @elide_with_retainRV( ; CHECK-NEXT: entry: @@ -81,16 +81,16 @@ entry: ; CHECK-LABEL: define ptr @elide_with_retainRV_splitByLifetime( ; CHECK-NEXT: entry: ; CHECK-NEXT: %x = alloca ptr -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr %x) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr %x) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %x) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %x) ; CHECK-NEXT: ret ptr %x define ptr @elide_with_retainRV_splitByLifetime() nounwind { entry: ; Cleanup should skip over lifetime intrinsics. %x = alloca ptr - call void @llvm.lifetime.start(i64 8, ptr %x) + call void @llvm.lifetime.start(ptr %x) %b = call ptr @llvm.objc.autoreleaseReturnValue(ptr %x) nounwind - call void @llvm.lifetime.end(i64 8, ptr %x) + call void @llvm.lifetime.end(ptr %x) %d = call ptr @llvm.objc.retainAutoreleasedReturnValue(ptr %b) nounwind ret ptr %d } @@ -221,17 +221,17 @@ entry: ; CHECK-LABEL: define ptr @elide_with_claimRV_splitByLifetime( ; CHECK-NEXT: entry: ; CHECK-NEXT: %x = alloca ptr -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr %x) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr %x) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %x) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %x) ; CHECK-NEXT: tail call void @llvm.objc.release(ptr %x) ; CHECK-NEXT: ret ptr %x define ptr @elide_with_claimRV_splitByLifetime() nounwind { entry: ; Cleanup should skip over lifetime intrinsics. %x = alloca ptr - call void @llvm.lifetime.start(i64 8, ptr %x) + call void @llvm.lifetime.start(ptr %x) %b = call ptr @llvm.objc.autoreleaseReturnValue(ptr %x) nounwind - call void @llvm.lifetime.end(i64 8, ptr %x) + call void @llvm.lifetime.end(ptr %x) %d = call ptr @llvm.objc.unsafeClaimAutoreleasedReturnValue(ptr %b) nounwind ret ptr %d } diff --git a/llvm/test/Transforms/ObjCARC/post-inlining.ll b/llvm/test/Transforms/ObjCARC/post-inlining.ll index c15e089..b184bea 100644 --- a/llvm/test/Transforms/ObjCARC/post-inlining.ll +++ b/llvm/test/Transforms/ObjCARC/post-inlining.ll @@ -65,22 +65,22 @@ entry: ; 2) Lifetime markers. -declare void @llvm.lifetime.start.p0(i64, ptr) -declare void @llvm.lifetime.end.p0(i64, ptr) +declare void @llvm.lifetime.start.p0(ptr) +declare void @llvm.lifetime.end.p0(ptr) ; CHECK-LABEL: define ptr @testLifetime( ; CHECK: entry: ; CHECK-NEXT: %obj = alloca i8 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr %obj) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr %obj) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %obj) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %obj) ; CHECK-NEXT: ret ptr %call.i ; CHECK-NEXT: } define ptr @testLifetime(ptr %call.i) { entry: %obj = alloca i8 - call void @llvm.lifetime.start.p0(i64 8, ptr %obj) + call void @llvm.lifetime.start.p0(ptr %obj) %0 = tail call ptr @llvm.objc.autoreleaseReturnValue(ptr %call.i) nounwind - call void @llvm.lifetime.end.p0(i64 8, ptr %obj) + call void @llvm.lifetime.end.p0(ptr %obj) %1 = tail call ptr @llvm.objc.retainAutoreleasedReturnValue(ptr %call.i) nounwind ret ptr %call.i } diff --git a/llvm/test/Transforms/ObjCARC/related-check.ll b/llvm/test/Transforms/ObjCARC/related-check.ll index 7c56b2d..045c001 100644 --- a/llvm/test/Transforms/ObjCARC/related-check.ll +++ b/llvm/test/Transforms/ObjCARC/related-check.ll @@ -52,9 +52,9 @@ for.cond.cleanup: ; preds = %for.cond.cleanup.lo for.body: ; preds = %for.body.lr.ph, %if.end19 %i.032 = phi i32 [ 1, %for.body.lr.ph ], [ %inc, %if.end19 ] - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %persistent) #4 + call void @llvm.lifetime.start.p0(ptr nonnull %persistent) #4 store i32 0, ptr %persistent, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %personalized) #4 + call void @llvm.lifetime.start.p0(ptr nonnull %personalized) #4 store i32 0, ptr %personalized, align 4 %call = call zeroext i1 @lookupType(ptr noundef nonnull %persistent, ptr noundef nonnull %personalized) #8, !clang.arc.no_objc_arc_exceptions !15 br i1 %call, label %if.then, label %if.end19 @@ -110,18 +110,18 @@ if.end18: ; preds = %if.else, %if.then13 br label %if.end19 if.end19: ; preds = %if.end18, %for.body - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %personalized) #4 - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %persistent) #4 + call void @llvm.lifetime.end.p0(ptr nonnull %personalized) #4 + call void @llvm.lifetime.end.p0(ptr nonnull %persistent) #4 %inc = add nuw nsw i32 %i.032, 1 %exitcond.not = icmp eq i32 %inc, %argc br i1 %exitcond.not, label %for.cond.cleanup.loopexit, label %for.body } ; Function Attrs: argmemonly mustprogress nocallback nofree nosync nounwind willreturn -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #2 +declare void @llvm.lifetime.start.p0(ptr nocapture) #2 ; Function Attrs: argmemonly mustprogress nocallback nofree nosync nounwind willreturn -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #2 +declare void @llvm.lifetime.end.p0(ptr nocapture) #2 ; Function Attrs: inaccessiblememonly mustprogress nocallback nofree nosync nounwind willreturn declare void @llvm.objc.clang.arc.noop.use(...) #5 diff --git a/llvm/test/Transforms/OpenMP/custom_state_machines_remarks.ll b/llvm/test/Transforms/OpenMP/custom_state_machines_remarks.ll index ad41639..60969ec 100644 --- a/llvm/test/Transforms/OpenMP/custom_state_machines_remarks.ll +++ b/llvm/test/Transforms/OpenMP/custom_state_machines_remarks.ll @@ -72,10 +72,10 @@ common.ret: ; preds = %entry, %user_code.e user_code.entry: ; preds = %entry %1 = call i32 @__kmpc_global_thread_num(ptr nonnull @3) #3 call void @unknown() #6, !dbg !20 - call void @llvm.lifetime.start.p0(i64 0, ptr nonnull %captured_vars_addrs.i.i) #3 + call void @llvm.lifetime.start.p0(ptr nonnull %captured_vars_addrs.i.i) #3 %2 = call i32 @__kmpc_global_thread_num(ptr noundef nonnull @13) #3 call void @__kmpc_parallel_51(ptr noundef nonnull @13, i32 %2, i32 noundef 1, i32 noundef -1, i32 noundef -1, ptr noundef @__omp_outlined__2, ptr noundef @__omp_outlined__2_wrapper, ptr noundef nonnull %captured_vars_addrs.i.i, i64 noundef 0) #3, !dbg !23 - call void @llvm.lifetime.end.p0(i64 0, ptr nonnull %captured_vars_addrs.i.i) #3, !dbg !26 + call void @llvm.lifetime.end.p0(ptr nonnull %captured_vars_addrs.i.i) #3, !dbg !26 call void @unknown() #6, !dbg !27 call void @__kmpc_target_deinit() #3, !dbg !28 br label %common.ret @@ -116,18 +116,18 @@ common.ret: ; preds = %entry, %user_code.e user_code.entry: ; preds = %entry %1 = call i32 @__kmpc_global_thread_num(ptr nonnull @9) #3 - call void @llvm.lifetime.start.p0(i64 0, ptr nonnull %captured_vars_addrs.i2.i) #3 + call void @llvm.lifetime.start.p0(ptr nonnull %captured_vars_addrs.i2.i) #3 %2 = call i32 @__kmpc_global_thread_num(ptr noundef nonnull @13) #3 call void @__kmpc_parallel_51(ptr noundef nonnull @13, i32 %2, i32 noundef 1, i32 noundef -1, i32 noundef -1, ptr noundef @__omp_outlined__2, ptr noundef @__omp_outlined__2_wrapper, ptr noundef nonnull %captured_vars_addrs.i2.i, i64 noundef 0) #3, !dbg !35 - call void @llvm.lifetime.end.p0(i64 0, ptr nonnull %captured_vars_addrs.i2.i) #3, !dbg !39 - call void @llvm.lifetime.start.p0(i64 0, ptr nonnull %captured_vars_addrs.i2.i) #3 + call void @llvm.lifetime.end.p0(ptr nonnull %captured_vars_addrs.i2.i) #3, !dbg !39 + call void @llvm.lifetime.start.p0(ptr nonnull %captured_vars_addrs.i2.i) #3 %3 = call i32 @__kmpc_global_thread_num(ptr noundef nonnull @13) #3 call void @__kmpc_parallel_51(ptr noundef nonnull @13, i32 %3, i32 noundef 1, i32 noundef -1, i32 noundef -1, ptr noundef @__omp_outlined__2, ptr noundef @__omp_outlined__2_wrapper, ptr noundef nonnull %captured_vars_addrs.i2.i, i64 noundef 0) #3, !dbg !40 - call void @llvm.lifetime.end.p0(i64 0, ptr nonnull %captured_vars_addrs.i2.i) #3, !dbg !42 - call void @llvm.lifetime.start.p0(i64 0, ptr nonnull %captured_vars_addrs.i2.i) #3 + call void @llvm.lifetime.end.p0(ptr nonnull %captured_vars_addrs.i2.i) #3, !dbg !42 + call void @llvm.lifetime.start.p0(ptr nonnull %captured_vars_addrs.i2.i) #3 %4 = call i32 @__kmpc_global_thread_num(ptr noundef nonnull @13) #3 call void @__kmpc_parallel_51(ptr noundef nonnull @13, i32 %4, i32 noundef 1, i32 noundef -1, i32 noundef -1, ptr noundef @__omp_outlined__2, ptr noundef @__omp_outlined__2_wrapper, ptr noundef nonnull %captured_vars_addrs.i2.i, i64 noundef 0) #3, !dbg !43 - call void @llvm.lifetime.end.p0(i64 0, ptr nonnull %captured_vars_addrs.i2.i) #3, !dbg !45 + call void @llvm.lifetime.end.p0(ptr nonnull %captured_vars_addrs.i2.i) #3, !dbg !45 call void @no_openmp() call void @no_parallelism() call void @__kmpc_target_deinit() #3, !dbg !46 @@ -155,10 +155,10 @@ declare void @__kmpc_get_shared_variables(ptr) local_unnamed_addr declare void @__kmpc_parallel_51(ptr, i32, i32, i32, i32, ptr, ptr, ptr, i64) local_unnamed_addr ; Function Attrs: argmemonly nofree nosync nounwind willreturn -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #5 +declare void @llvm.lifetime.start.p0(ptr nocapture) #5 ; Function Attrs: argmemonly nofree nosync nounwind willreturn -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #5 +declare void @llvm.lifetime.end.p0(ptr nocapture) #5 declare void @no_openmp() #7 declare void @no_parallelism() #8 diff --git a/llvm/test/Transforms/OpenMP/nested_parallelism.ll b/llvm/test/Transforms/OpenMP/nested_parallelism.ll index 412e5ea..5d96465 100644 --- a/llvm/test/Transforms/OpenMP/nested_parallelism.ll +++ b/llvm/test/Transforms/OpenMP/nested_parallelism.ll @@ -52,7 +52,7 @@ define weak_odr protected ptx_kernel void @__omp_offloading_10302_bd7e0_main_l13 ; CHECK: common.ret: ; CHECK-NEXT: ret void ; CHECK: user_code.entry: -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr nonnull [[CAPTURED_VARS_ADDRS_I]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[CAPTURED_VARS_ADDRS_I]]) ; CHECK-NEXT: [[TMP1:%.*]] = tail call i32 @__kmpc_global_thread_num(ptr nonnull @[[GLOB1]]) #[[ATTR2:[0-9]+]] ; CHECK-NEXT: [[TMP2:%.*]] = tail call i32 @__kmpc_get_hardware_thread_id_in_block() #[[ATTR2]] ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[TMP2]], 0 @@ -66,7 +66,7 @@ define weak_odr protected ptx_kernel void @__omp_offloading_10302_bd7e0_main_l13 ; CHECK-NEXT: [[TMP4:%.*]] = addrspacecast ptr [[CAPTURED_VARS_ADDRS_I]] to ptr addrspace(5) ; CHECK-NEXT: store ptr addrspacecast (ptr addrspace(3) @i_shared to ptr), ptr addrspace(5) [[TMP4]], align 8 ; CHECK-NEXT: call void @__kmpc_parallel_51(ptr nonnull @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr nonnull @__omp_outlined__, ptr nonnull @__omp_outlined___wrapper, ptr nonnull [[CAPTURED_VARS_ADDRS_I]], i64 1) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr nonnull [[CAPTURED_VARS_ADDRS_I]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[CAPTURED_VARS_ADDRS_I]]) ; CHECK-NEXT: call void @__kmpc_target_deinit() ; CHECK-NEXT: br label [[COMMON_RET]] ; @@ -80,7 +80,7 @@ common.ret: ; preds = %entry, %_Z3fooi.int ret void user_code.entry: ; preds = %entry - call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %captured_vars_addrs.i) + call void @llvm.lifetime.start.p0(ptr nonnull %captured_vars_addrs.i) %1 = tail call i32 @__kmpc_global_thread_num(ptr nonnull @1) #6 %2 = tail call i32 @__kmpc_get_hardware_thread_id_in_block() #6 %3 = icmp eq i32 %2, 0 @@ -95,7 +95,7 @@ _Z3fooi.internalized.exit: ; preds = %user_code.entry, %r tail call void @__kmpc_barrier_simple_spmd(ptr nonnull @1, i32 %2) store ptr addrspacecast (ptr addrspace(3) @i_shared to ptr), ptr %captured_vars_addrs.i, align 8 call void @__kmpc_parallel_51(ptr nonnull @1, i32 %1, i32 1, i32 -1, i32 -1, ptr nonnull @__omp_outlined__, ptr nonnull @__omp_outlined___wrapper, ptr nonnull %captured_vars_addrs.i, i64 1) #6 - call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %captured_vars_addrs.i) + call void @llvm.lifetime.end.p0(ptr nonnull %captured_vars_addrs.i) call void @__kmpc_target_deinit() #6 br label %common.ret } @@ -139,13 +139,13 @@ define weak_odr protected ptx_kernel void @__omp_offloading_10302_bd7e0_main_l16 ; CHECK-NEXT: ret void ; CHECK: user_code.entry: ; CHECK-NEXT: [[I_ADDR_SROA_0_0_EXTRACT_TRUNC:%.*]] = trunc i64 [[I:%.*]] to i32 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr nonnull [[CAPTURED_VARS_ADDRS_I]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[CAPTURED_VARS_ADDRS_I]]) ; CHECK-NEXT: [[TMP1:%.*]] = tail call i32 @__kmpc_global_thread_num(ptr nonnull @[[GLOB1]]) #[[ATTR2]] ; CHECK-NEXT: store i32 [[I_ADDR_SROA_0_0_EXTRACT_TRUNC]], ptr addrspace(3) @i.i_shared, align 16 ; CHECK-NEXT: [[TMP2:%.*]] = addrspacecast ptr [[CAPTURED_VARS_ADDRS_I]] to ptr addrspace(5) ; CHECK-NEXT: store ptr addrspacecast (ptr addrspace(3) @i.i_shared to ptr), ptr addrspace(5) [[TMP2]], align 8 ; CHECK-NEXT: call void @__kmpc_parallel_51(ptr nonnull @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr nonnull @__omp_outlined__1, ptr nonnull @__omp_outlined__1_wrapper, ptr nonnull [[CAPTURED_VARS_ADDRS_I]], i64 1) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr nonnull [[CAPTURED_VARS_ADDRS_I]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[CAPTURED_VARS_ADDRS_I]]) ; CHECK-NEXT: call void @__kmpc_target_deinit() ; CHECK-NEXT: br label [[COMMON_RET]] ; @@ -160,12 +160,12 @@ common.ret: ; preds = %entry, %user_code.e user_code.entry: ; preds = %entry %i.addr.sroa.0.0.extract.trunc = trunc i64 %i to i32 - call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %captured_vars_addrs.i) + call void @llvm.lifetime.start.p0(ptr nonnull %captured_vars_addrs.i) %1 = tail call i32 @__kmpc_global_thread_num(ptr nonnull @1) #6 store i32 %i.addr.sroa.0.0.extract.trunc, ptr addrspacecast (ptr addrspace(3) @i.i_shared to ptr), align 16 store ptr addrspacecast (ptr addrspace(3) @i.i_shared to ptr), ptr %captured_vars_addrs.i, align 8 call void @__kmpc_parallel_51(ptr nonnull @1, i32 %1, i32 1, i32 -1, i32 -1, ptr nonnull @__omp_outlined__1, ptr nonnull @__omp_outlined__1_wrapper, ptr nonnull %captured_vars_addrs.i, i64 1) #6 - call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %captured_vars_addrs.i) + call void @llvm.lifetime.end.p0(ptr nonnull %captured_vars_addrs.i) call void @__kmpc_target_deinit() #6 br label %common.ret } @@ -201,7 +201,7 @@ define internal void @__omp_outlined__(ptr noalias nocapture readnone %.global_t ; CHECK-NEXT: entry: ; CHECK-NEXT: [[CAPTURED_VARS_ADDRS_I:%.*]] = alloca [1 x ptr], align 8 ; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[I:%.*]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr nonnull [[CAPTURED_VARS_ADDRS_I]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[CAPTURED_VARS_ADDRS_I]]) ; CHECK-NEXT: [[TMP1:%.*]] = tail call i32 @__kmpc_global_thread_num(ptr nonnull @[[GLOB1]]) #[[ATTR2]] ; CHECK-NEXT: [[I_I:%.*]] = tail call align 16 dereferenceable_or_null(4) ptr @__kmpc_alloc_shared(i64 4) #[[ATTR2]] ; CHECK-NEXT: store i32 [[TMP0]], ptr [[I_I]], align 16 @@ -209,20 +209,20 @@ define internal void @__omp_outlined__(ptr noalias nocapture readnone %.global_t ; CHECK-NEXT: store ptr [[I_I]], ptr addrspace(5) [[TMP2]], align 8 ; CHECK-NEXT: call void @__kmpc_parallel_51(ptr nonnull @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr nonnull @__omp_outlined__1, ptr nonnull @__omp_outlined__1_wrapper, ptr nonnull [[CAPTURED_VARS_ADDRS_I]], i64 1) ; CHECK-NEXT: call void @__kmpc_free_shared(ptr [[I_I]], i64 4) #[[ATTR2]] -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr nonnull [[CAPTURED_VARS_ADDRS_I]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[CAPTURED_VARS_ADDRS_I]]) ; CHECK-NEXT: ret void ; entry: %captured_vars_addrs.i = alloca [1 x ptr], align 8 %0 = load i32, ptr %i, align 4 - call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %captured_vars_addrs.i) + call void @llvm.lifetime.start.p0(ptr nonnull %captured_vars_addrs.i) %1 = tail call i32 @__kmpc_global_thread_num(ptr nonnull @1) #6 %i.i = tail call align 16 dereferenceable_or_null(4) ptr @__kmpc_alloc_shared(i64 4) #6 store i32 %0, ptr %i.i, align 16 store ptr %i.i, ptr %captured_vars_addrs.i, align 8 call void @__kmpc_parallel_51(ptr nonnull @1, i32 %1, i32 1, i32 -1, i32 -1, ptr nonnull @__omp_outlined__1, ptr nonnull @__omp_outlined__1_wrapper, ptr nonnull %captured_vars_addrs.i, i64 1) #6 call void @__kmpc_free_shared(ptr %i.i, i64 4) #6 - call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %captured_vars_addrs.i) + call void @llvm.lifetime.end.p0(ptr nonnull %captured_vars_addrs.i) ret void } @@ -236,7 +236,7 @@ define internal void @__omp_outlined___wrapper(i16 zeroext %0, i32 %1) #5 { ; CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr addrspace(5) [[TMP5]], align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TMP2]], align 8 ; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr nonnull [[CAPTURED_VARS_ADDRS_I_I]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[CAPTURED_VARS_ADDRS_I_I]]) ; CHECK-NEXT: [[TMP6:%.*]] = call i32 @__kmpc_global_thread_num(ptr nonnull @[[GLOB1]]) #[[ATTR2]] ; CHECK-NEXT: [[I_I_I:%.*]] = call align 16 dereferenceable_or_null(4) ptr @__kmpc_alloc_shared(i64 4) #[[ATTR2]] ; CHECK-NEXT: store i32 [[TMP4]], ptr [[I_I_I]], align 16 @@ -244,7 +244,7 @@ define internal void @__omp_outlined___wrapper(i16 zeroext %0, i32 %1) #5 { ; CHECK-NEXT: store ptr [[I_I_I]], ptr addrspace(5) [[TMP7]], align 8 ; CHECK-NEXT: call void @__kmpc_parallel_51(ptr nonnull @[[GLOB1]], i32 [[TMP6]], i32 1, i32 -1, i32 -1, ptr nonnull @__omp_outlined__1, ptr nonnull @__omp_outlined__1_wrapper, ptr nonnull [[CAPTURED_VARS_ADDRS_I_I]], i64 1) ; CHECK-NEXT: call void @__kmpc_free_shared(ptr [[I_I_I]], i64 4) #[[ATTR2]] -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr nonnull [[CAPTURED_VARS_ADDRS_I_I]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[CAPTURED_VARS_ADDRS_I_I]]) ; CHECK-NEXT: ret void ; entry: @@ -254,14 +254,14 @@ entry: %2 = load ptr, ptr %global_args, align 8 %3 = load ptr, ptr %2, align 8 %4 = load i32, ptr %3, align 4 - call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %captured_vars_addrs.i.i) + call void @llvm.lifetime.start.p0(ptr nonnull %captured_vars_addrs.i.i) %5 = call i32 @__kmpc_global_thread_num(ptr nonnull @1) #6 %i.i.i = call align 16 dereferenceable_or_null(4) ptr @__kmpc_alloc_shared(i64 4) #6 store i32 %4, ptr %i.i.i, align 16 store ptr %i.i.i, ptr %captured_vars_addrs.i.i, align 8 call void @__kmpc_parallel_51(ptr nonnull @1, i32 %5, i32 1, i32 -1, i32 -1, ptr nonnull @__omp_outlined__1, ptr nonnull @__omp_outlined__1_wrapper, ptr nonnull %captured_vars_addrs.i.i, i64 1) #6 call void @__kmpc_free_shared(ptr %i.i.i, i64 4) #6 - call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %captured_vars_addrs.i.i) + call void @llvm.lifetime.end.p0(ptr nonnull %captured_vars_addrs.i.i) ret void } @@ -316,9 +316,9 @@ declare i32 @__kmpc_get_hardware_thread_id_in_block() local_unnamed_addr declare void @__kmpc_barrier_simple_spmd(ptr, i32) local_unnamed_addr #10 -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #11 +declare void @llvm.lifetime.start.p0(ptr nocapture) #11 -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #11 +declare void @llvm.lifetime.end.p0(ptr nocapture) #11 !omp_offload.info = !{!0, !1} diff --git a/llvm/test/Transforms/OpenMP/parallel_deletion.ll b/llvm/test/Transforms/OpenMP/parallel_deletion.ll index 3e16d96..67970c4 100644 --- a/llvm/test/Transforms/OpenMP/parallel_deletion.ll +++ b/llvm/test/Transforms/OpenMP/parallel_deletion.ll @@ -282,46 +282,46 @@ define void @delete_parallel_2() { ; CHECK-LABEL: define {{[^@]+}}@delete_parallel_2() { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr noundef nonnull align 4 dereferenceable(4) [[A]]) #[[ATTR18:[0-9]+]] +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr noundef nonnull align 4 dereferenceable(4) [[A]]) #[[ATTR18:[0-9]+]] ; CHECK-NEXT: store i32 0, ptr [[A]], align 4 ; CHECK-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr noundef nonnull align 8 dereferenceable(24) @[[GLOB0]], i32 noundef 1, ptr noundef nonnull @.omp_outlined..3, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[A]]) ; CHECK-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr noundef nonnull align 8 dereferenceable(24) @[[GLOB0]], i32 noundef 1, ptr noundef nonnull @.omp_outlined..4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[A]]) ; CHECK-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr noundef nonnull align 8 dereferenceable(24) @[[GLOB0]], i32 noundef 1, ptr noundef nonnull @.omp_outlined..5, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[A]]) ; CHECK-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr noundef nonnull align 8 dereferenceable(24) @[[GLOB0]], i32 noundef 1, ptr noundef nonnull @.omp_outlined..6, ptr noundef nonnull align 4 captures(none) dereferenceable(4) [[A]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noundef nonnull [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr noundef nonnull [[A]]) ; CHECK-NEXT: ret void ; ; CHECK1-LABEL: define {{[^@]+}}@delete_parallel_2() { ; CHECK1-NEXT: entry: ; CHECK1-NEXT: [[A:%.*]] = alloca i32, align 4 -; CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr noundef nonnull align 4 dereferenceable(4) [[A]]) #[[ATTR0]] +; CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr noundef nonnull align 4 dereferenceable(4) [[A]]) #[[ATTR0]] ; CHECK1-NEXT: store i32 0, ptr [[A]], align 4 ; CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr noundef nonnull align 8 dereferenceable(24) @[[GLOB0]], i32 noundef 1, ptr noundef @.omp_outlined..3, ptr nocapture nofree noundef nonnull align 4 dereferenceable(4) [[A]]) ; CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr noundef nonnull align 8 dereferenceable(24) @[[GLOB0]], i32 noundef 1, ptr noundef @.omp_outlined..4, ptr nocapture nofree noundef nonnull align 4 dereferenceable(4) [[A]]) ; CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr noundef nonnull align 8 dereferenceable(24) @[[GLOB0]], i32 noundef 1, ptr noundef @.omp_outlined..5, ptr nocapture nofree noundef nonnull align 4 dereferenceable(4) [[A]]) ; CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr noundef nonnull align 8 dereferenceable(24) @[[GLOB0]], i32 noundef 1, ptr noundef @.omp_outlined..6, ptr nocapture noundef nonnull align 4 dereferenceable(4) [[A]]) -; CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noundef nonnull [[A]]) +; CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr noundef nonnull [[A]]) ; CHECK1-NEXT: ret void ; CHECK2-LABEL: define {{[^@]+}}@delete_parallel_2() { ; CHECK2-NEXT: entry: ; CHECK2-NEXT: [[A:%.*]] = alloca i32, align 4 -; CHECK2-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr noundef nonnull align 4 dereferenceable(4) [[A]]) #[[ATTR0]] +; CHECK2-NEXT: call void @llvm.lifetime.start.p0(ptr noundef nonnull align 4 dereferenceable(4) [[A]]) #[[ATTR0]] ; CHECK2-NEXT: store i32 0, ptr [[A]], align 4 ; CHECK2-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr noundef nonnull align 8 dereferenceable(24) @[[GLOB0]], i32 noundef 1, ptr noundef @.omp_outlined..3, ptr nocapture nofree noundef nonnull align 4 dereferenceable(4) [[A]]) ; CHECK2-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr noundef nonnull align 8 dereferenceable(24) @[[GLOB0]], i32 noundef 1, ptr noundef @.omp_outlined..4, ptr nocapture nofree noundef nonnull align 4 dereferenceable(4) [[A]]) ; CHECK2-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr noundef nonnull align 8 dereferenceable(24) @[[GLOB0]], i32 noundef 1, ptr noundef @.omp_outlined..5, ptr nocapture nofree noundef nonnull align 4 dereferenceable(4) [[A]]) ; CHECK2-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr noundef nonnull align 8 dereferenceable(24) @[[GLOB0]], i32 noundef 1, ptr noundef @.omp_outlined..6, ptr nocapture noundef nonnull align 4 dereferenceable(4) [[A]]) -; CHECK2-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noundef nonnull [[A]]) +; CHECK2-NEXT: call void @llvm.lifetime.end.p0(ptr noundef nonnull [[A]]) ; CHECK2-NEXT: ret void entry: %a = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %a) + call void @llvm.lifetime.start.p0(ptr nonnull %a) store i32 0, ptr %a, align 4 call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr nonnull @0, i32 1, ptr @.omp_outlined..3, ptr nonnull %a) call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr nonnull @0, i32 1, ptr @.omp_outlined..4, ptr nonnull %a) call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr nonnull @0, i32 1, ptr @.omp_outlined..5, ptr nonnull %a) call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr nonnull @0, i32 1, ptr @.omp_outlined..6, ptr nonnull %a) - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %a) + call void @llvm.lifetime.end.p0(ptr nonnull %a) ret void } @@ -445,7 +445,7 @@ omp_if.end: ; preds = %entry, %omp_if.then ret void } -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) declare i32 @omp_get_thread_num() inaccessiblememonly nofree nosync nounwind readonly @@ -531,7 +531,7 @@ define internal void @.omp_outlined..6(ptr noalias %.global_tid., ptr noalias %. ; CHECK-NEXT: entry: ; CHECK-NEXT: [[A1:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr noundef nonnull align 4 [[A1]]) #[[ATTR20:[0-9]+]] +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr noundef nonnull align 4 [[A1]]) #[[ATTR20:[0-9]+]] ; CHECK-NEXT: store i32 1, ptr [[A1]], align 4 ; CHECK-NEXT: store ptr [[A1]], ptr [[DOTOMP_REDUCTION_RED_LIST]], align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTGLOBAL_TID_]], align 4 @@ -552,7 +552,7 @@ define internal void @.omp_outlined..6(ptr noalias %.global_tid., ptr noalias %. ; CHECK-NEXT: [[TMP8:%.*]] = atomicrmw add ptr [[A]], i32 [[TMP7]] monotonic, align 4 ; CHECK-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] ; CHECK: .omp.reduction.default: -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noundef nonnull [[A1]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr noundef nonnull [[A1]]) ; CHECK-NEXT: ret void ; ; CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..6 @@ -560,7 +560,7 @@ define internal void @.omp_outlined..6(ptr noalias %.global_tid., ptr noalias %. ; CHECK1-NEXT: entry: ; CHECK1-NEXT: [[A1:%.*]] = alloca i32, align 4 ; CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 8 -; CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr noundef nonnull align 4 [[A1]]) #[[ATTR0]] +; CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr noundef nonnull align 4 [[A1]]) #[[ATTR0]] ; CHECK1-NEXT: store i32 1, ptr [[A1]], align 4 ; CHECK1-NEXT: store ptr [[A1]], ptr [[DOTOMP_REDUCTION_RED_LIST]], align 8 ; CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTGLOBAL_TID_]], align 4 @@ -581,14 +581,14 @@ define internal void @.omp_outlined..6(ptr noalias %.global_tid., ptr noalias %. ; CHECK1-NEXT: [[TMP8:%.*]] = atomicrmw add ptr [[A]], i32 [[TMP7]] monotonic, align 4 ; CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] ; CHECK1: .omp.reduction.default: -; CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noundef nonnull [[A1]]) +; CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr noundef nonnull [[A1]]) ; CHECK1-NEXT: ret void ; CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..6 ; CHECK2-SAME: (ptr noalias nocapture nofree noundef nonnull readonly align 4 dereferenceable(4) [[DOTGLOBAL_TID_:%.*]], ptr noalias nocapture nofree readnone [[DOTBOUND_TID_:%.*]], ptr nocapture noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) { ; CHECK2-NEXT: entry: ; CHECK2-NEXT: [[A1:%.*]] = alloca i32, align 4 ; CHECK2-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 8 -; CHECK2-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr noundef nonnull align 4 [[A1]]) #[[ATTR0]] +; CHECK2-NEXT: call void @llvm.lifetime.start.p0(ptr noundef nonnull align 4 [[A1]]) #[[ATTR0]] ; CHECK2-NEXT: store i32 1, ptr [[A1]], align 4 ; CHECK2-NEXT: store ptr [[A1]], ptr [[DOTOMP_REDUCTION_RED_LIST]], align 8 ; CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTGLOBAL_TID_]], align 4 @@ -609,12 +609,12 @@ define internal void @.omp_outlined..6(ptr noalias %.global_tid., ptr noalias %. ; CHECK2-NEXT: [[TMP8:%.*]] = atomicrmw add ptr [[A]], i32 [[TMP7]] monotonic, align 4 ; CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] ; CHECK2: .omp.reduction.default: -; CHECK2-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noundef nonnull [[A1]]) +; CHECK2-NEXT: call void @llvm.lifetime.end.p0(ptr noundef nonnull [[A1]]) ; CHECK2-NEXT: ret void entry: %a1 = alloca i32, align 4 %.omp.reduction.red_list = alloca [1 x ptr], align 8 - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %a1) + call void @llvm.lifetime.start.p0(ptr nonnull %a1) store i32 1, ptr %a1, align 4 store ptr %a1, ptr %.omp.reduction.red_list, align 8 %tmp2 = load i32, ptr %.global_tid., align 4 @@ -638,7 +638,7 @@ entry: br label %.omp.reduction.default .omp.reduction.default: ; preds = %.omp.reduction.case2, %.omp.reduction.case1, %entry - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %a1) + call void @llvm.lifetime.end.p0(ptr nonnull %a1) ret void } @@ -696,7 +696,7 @@ declare i32 @__kmpc_reduce_nowait(ptr, i32, i32, i64, ptr, ptr, ptr) declare void @__kmpc_end_reduce_nowait(ptr, i32, ptr) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) declare !callback !2 void @__kmpc_fork_call(ptr, i32, ptr, ...) diff --git a/llvm/test/Transforms/OpenMP/parallel_region_merging.ll b/llvm/test/Transforms/OpenMP/parallel_region_merging.ll index d587b9a..83452e7 100644 --- a/llvm/test/Transforms/OpenMP/parallel_region_merging.ll +++ b/llvm/test/Transforms/OpenMP/parallel_region_merging.ll @@ -433,11 +433,11 @@ entry: %b = alloca i32, align 4 store i32 %a, ptr %a.addr, align 4 call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr nonnull @1, i32 1, ptr @.omp_outlined..14, ptr nonnull %a.addr) - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %b) + call void @llvm.lifetime.start.p0(ptr nonnull %b) %0 = ptrtoint ptr %b to i64 %1 = trunc i64 %0 to i32 store i32 %1, ptr %b, align 4 - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %b) + call void @llvm.lifetime.end.p0(ptr nonnull %b) call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr nonnull @1, i32 1, ptr @.omp_outlined..15, ptr nonnull %a.addr) ret void } @@ -449,9 +449,9 @@ entry: ret void } -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) define internal void @.omp_outlined..15(ptr noalias nocapture readnone %.global_tid., ptr noalias nocapture readnone %.bound_tid., ptr nocapture nonnull readonly align 4 dereferenceable(4) %a) { entry: @@ -466,12 +466,12 @@ entry: %b = alloca i32, align 4 store i32 %a, ptr %a.addr, align 4 call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr nonnull @1, i32 1, ptr @.omp_outlined..16, ptr nonnull %a.addr) - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %b) + call void @llvm.lifetime.start.p0(ptr nonnull %b) %0 = load i32, ptr %a.addr, align 4 %add = add nsw i32 %0, 1 store i32 %add, ptr %b, align 4 call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr nonnull @1, i32 2, ptr @.omp_outlined..17, ptr nonnull %a.addr, ptr nonnull %b) - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %b) + call void @llvm.lifetime.end.p0(ptr nonnull %b) ret void } @@ -1184,11 +1184,11 @@ entry: ; CHECK: omp_region.body: ; CHECK-NEXT: br label [[SEQ_PAR_MERGED:%.*]] ; CHECK: seq.par.merged: -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr noundef nonnull [[B]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr noundef nonnull [[B]]) ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[B]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = trunc i64 [[TMP4]] to i32 ; CHECK-NEXT: store i32 [[TMP5]], ptr [[B]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noundef nonnull [[B]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr noundef nonnull [[B]]) ; CHECK-NEXT: br label [[OMP_PAR_MERGED_SPLIT:%.*]] ; CHECK: omp.par.merged.split: ; CHECK-NEXT: br label [[OMP_REGION_BODY_SPLIT:%.*]] @@ -1216,7 +1216,7 @@ entry: ; CHECK-NEXT: [[OMP_GLOBAL_THREAD_NUM2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) ; CHECK-NEXT: br label [[OMP_PARALLEL:%.*]] ; CHECK: omp_parallel: -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B]]) ; CHECK-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 2, ptr @merge_seq_par_use..omp_par, ptr [[A_ADDR]], ptr [[B]]) ; CHECK-NEXT: br label [[OMP_PAR_OUTLINED_EXIT:%.*]] ; CHECK: omp.par.outlined.exit: @@ -1224,7 +1224,7 @@ entry: ; CHECK: omp.par.exit.split: ; CHECK-NEXT: br label [[ENTRY_SPLIT_SPLIT:%.*]] ; CHECK: entry.split.split: -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noundef nonnull [[B]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr noundef nonnull [[B]]) ; CHECK-NEXT: ret void ; CHECK-LABEL: define {{[^@]+}}@merge_seq_par_use..omp_par ; CHECK-SAME: (ptr noalias [[TID_ADDR:%.*]], ptr noalias [[ZERO_ADDR:%.*]], ptr [[A_ADDR:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { @@ -2155,11 +2155,11 @@ entry: ; CHECK: omp_region.body: ; CHECK-NEXT: br label [[SEQ_PAR_MERGED:%.*]] ; CHECK: seq.par.merged: -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr noundef nonnull [[B]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr noundef nonnull [[B]]) ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[B]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = trunc i64 [[TMP4]] to i32 ; CHECK-NEXT: store i32 [[TMP5]], ptr [[B]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noundef nonnull [[B]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr noundef nonnull [[B]]) ; CHECK-NEXT: br label [[OMP_PAR_MERGED_SPLIT:%.*]] ; CHECK: omp.par.merged.split: ; CHECK-NEXT: br label [[OMP_REGION_BODY_SPLIT:%.*]] @@ -2187,7 +2187,7 @@ entry: ; CHECK-NEXT: [[OMP_GLOBAL_THREAD_NUM2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) ; CHECK-NEXT: br label [[OMP_PARALLEL:%.*]] ; CHECK: omp_parallel: -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B]]) ; CHECK-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 2, ptr @merge_seq_par_use..omp_par, ptr [[A_ADDR]], ptr [[B]]) ; CHECK-NEXT: br label [[OMP_PAR_OUTLINED_EXIT:%.*]] ; CHECK: omp.par.outlined.exit: @@ -2195,7 +2195,7 @@ entry: ; CHECK: omp.par.exit.split: ; CHECK-NEXT: br label [[ENTRY_SPLIT_SPLIT:%.*]] ; CHECK: entry.split.split: -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noundef nonnull [[B]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr noundef nonnull [[B]]) ; CHECK-NEXT: ret void ; CHECK-LABEL: define {{[^@]+}}@merge_seq_par_use..omp_par ; CHECK-SAME: (ptr noalias [[TID_ADDR:%.*]], ptr noalias [[ZERO_ADDR:%.*]], ptr [[A_ADDR:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { @@ -3126,11 +3126,11 @@ entry: ; CHECK: omp_region.body: ; CHECK-NEXT: br label [[SEQ_PAR_MERGED:%.*]] ; CHECK: seq.par.merged: -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr noundef nonnull [[B]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr noundef nonnull [[B]]) ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[B]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = trunc i64 [[TMP4]] to i32 ; CHECK-NEXT: store i32 [[TMP5]], ptr [[B]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noundef nonnull [[B]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr noundef nonnull [[B]]) ; CHECK-NEXT: br label [[OMP_PAR_MERGED_SPLIT:%.*]] ; CHECK: omp.par.merged.split: ; CHECK-NEXT: br label [[OMP_REGION_BODY_SPLIT:%.*]] @@ -3158,7 +3158,7 @@ entry: ; CHECK-NEXT: [[OMP_GLOBAL_THREAD_NUM2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) ; CHECK-NEXT: br label [[OMP_PARALLEL:%.*]] ; CHECK: omp_parallel: -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B]]) ; CHECK-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 2, ptr @merge_seq_par_use..omp_par, ptr [[A_ADDR]], ptr [[B]]) ; CHECK-NEXT: br label [[OMP_PAR_OUTLINED_EXIT:%.*]] ; CHECK: omp.par.outlined.exit: @@ -3166,7 +3166,7 @@ entry: ; CHECK: omp.par.exit.split: ; CHECK-NEXT: br label [[ENTRY_SPLIT_SPLIT:%.*]] ; CHECK: entry.split.split: -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noundef nonnull [[B]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr noundef nonnull [[B]]) ; CHECK-NEXT: ret void ; CHECK-LABEL: define {{[^@]+}}@merge_seq_par_use..omp_par ; CHECK-SAME: (ptr noalias [[TID_ADDR:%.*]], ptr noalias [[ZERO_ADDR:%.*]], ptr [[A_ADDR:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { @@ -4097,11 +4097,11 @@ entry: ; CHECK: omp_region.body: ; CHECK-NEXT: br label [[SEQ_PAR_MERGED:%.*]] ; CHECK: seq.par.merged: -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr noundef nonnull [[B]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr noundef nonnull [[B]]) ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[B]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = trunc i64 [[TMP4]] to i32 ; CHECK-NEXT: store i32 [[TMP5]], ptr [[B]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noundef nonnull [[B]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr noundef nonnull [[B]]) ; CHECK-NEXT: br label [[OMP_PAR_MERGED_SPLIT:%.*]] ; CHECK: omp.par.merged.split: ; CHECK-NEXT: br label [[OMP_REGION_BODY_SPLIT:%.*]] @@ -4129,7 +4129,7 @@ entry: ; CHECK-NEXT: [[OMP_GLOBAL_THREAD_NUM2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) ; CHECK-NEXT: br label [[OMP_PARALLEL:%.*]] ; CHECK: omp_parallel: -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B]]) ; CHECK-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 2, ptr @merge_seq_par_use..omp_par, ptr [[A_ADDR]], ptr [[B]]) ; CHECK-NEXT: br label [[OMP_PAR_OUTLINED_EXIT:%.*]] ; CHECK: omp.par.outlined.exit: @@ -4137,7 +4137,7 @@ entry: ; CHECK: omp.par.exit.split: ; CHECK-NEXT: br label [[ENTRY_SPLIT_SPLIT:%.*]] ; CHECK: entry.split.split: -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noundef nonnull [[B]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr noundef nonnull [[B]]) ; CHECK-NEXT: ret void ; CHECK-LABEL: define {{[^@]+}}@merge_seq_par_use..omp_par ; CHECK-SAME: (ptr noalias [[TID_ADDR:%.*]], ptr noalias [[ZERO_ADDR:%.*]], ptr [[A_ADDR:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { @@ -5148,11 +5148,11 @@ entry: ; CHECK2: omp_region.body: ; CHECK2-NEXT: br label [[SEQ_PAR_MERGED:%.*]] ; CHECK2: seq.par.merged: -; CHECK2-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr noundef nonnull [[B]]) +; CHECK2-NEXT: call void @llvm.lifetime.start.p0(ptr noundef nonnull [[B]]) ; CHECK2-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[B]] to i64 ; CHECK2-NEXT: [[TMP5:%.*]] = trunc i64 [[TMP4]] to i32 ; CHECK2-NEXT: store i32 [[TMP5]], ptr [[B]], align 4 -; CHECK2-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noundef nonnull [[B]]) +; CHECK2-NEXT: call void @llvm.lifetime.end.p0(ptr noundef nonnull [[B]]) ; CHECK2-NEXT: br label [[OMP_PAR_MERGED_SPLIT:%.*]] ; CHECK2: omp.par.merged.split: ; CHECK2-NEXT: br label [[OMP_REGION_BODY_SPLIT:%.*]] @@ -5197,13 +5197,13 @@ entry: ; CHECK2-NEXT: store ptr [[A_ADDR]], ptr [[GEP_A_ADDR]], align 8 ; CHECK2-NEXT: [[GEP_B:%.*]] = getelementptr { ptr, ptr, ptr }, ptr [[STRUCTARG]], i32 0, i32 2 ; CHECK2-NEXT: store ptr [[B]], ptr [[GEP_B]], align 8 -; CHECK2-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B]]) +; CHECK2-NEXT: call void @llvm.lifetime.start.p0(ptr [[B]]) ; CHECK2-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 1, ptr @merge_seq_par_use..omp_par, ptr [[STRUCTARG]]) ; CHECK2-NEXT: br label [[OMP_PAR_OUTLINED_EXIT:%.*]] ; CHECK2: omp.par.exit: ; CHECK2-NEXT: br label [[ENTRY_SPLIT_SPLIT:%.*]] ; CHECK2: entry.split.split: -; CHECK2-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noundef nonnull [[B]]) +; CHECK2-NEXT: call void @llvm.lifetime.end.p0(ptr noundef nonnull [[B]]) ; CHECK2-NEXT: ret void ; ; diff --git a/llvm/test/Transforms/OpenMP/spmdization.ll b/llvm/test/Transforms/OpenMP/spmdization.ll index e91f160..0272c41 100644 --- a/llvm/test/Transforms/OpenMP/spmdization.ll +++ b/llvm/test/Transforms/OpenMP/spmdization.ll @@ -2922,7 +2922,7 @@ declare void @unknown() #7 declare void @unknowni32p(ptr) #7 ; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) -declare void @llvm.lifetime.start.p0(i64 immarg, ptr captures(none)) #8 +declare void @llvm.lifetime.start.p0(ptr captures(none)) #8 define weak i32 @__kmpc_target_init(ptr %0, ptr %1) { ; AMDGPU-LABEL: define {{[^@]+}}@__kmpc_target_init @@ -2958,7 +2958,7 @@ declare void @__kmpc_get_shared_variables(ptr) declare void @__kmpc_parallel_51(ptr, i32, i32, i32, i32, ptr, ptr, ptr, i64) #9 ; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr captures(none)) #8 +declare void @llvm.lifetime.end.p0(ptr captures(none)) #8 ; Function Attrs: convergent declare void @spmd_amenable() #6 diff --git a/llvm/test/Transforms/OpenMP/spmdization_constant_prop.ll b/llvm/test/Transforms/OpenMP/spmdization_constant_prop.ll index 70c0d04..5a7d097 100644 --- a/llvm/test/Transforms/OpenMP/spmdization_constant_prop.ll +++ b/llvm/test/Transforms/OpenMP/spmdization_constant_prop.ll @@ -61,7 +61,7 @@ common.ret: ; preds = %user_code.entry, %e } ; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) -declare void @llvm.lifetime.start.p5(i64 immarg, ptr addrspace(5) captures(none)) #1 +declare void @llvm.lifetime.start.p5(ptr addrspace(5) captures(none)) #1 ; Function Attrs: alwaysinline mustprogress nofree norecurse nosync nounwind willreturn memory(none) define internal void @__omp_outlined__(ptr noalias captures(none) %.global_tid., ptr noalias captures(none) %.bound_tid., ptr nonnull align 4 captures(none) %ng, ptr nonnull align 8 captures(none) %aa) #2 { @@ -72,7 +72,7 @@ entry: } ; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) -declare void @llvm.lifetime.end.p5(i64 immarg, ptr addrspace(5) captures(none)) #1 +declare void @llvm.lifetime.end.p5(ptr addrspace(5) captures(none)) #1 ; Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(none) define internal void @__omp_outlined___wrapper(i16 zeroext %0, i32 noundef %1) #3 { diff --git a/llvm/test/Transforms/OpenMP/spmdization_indirect.ll b/llvm/test/Transforms/OpenMP/spmdization_indirect.ll index 3c3e1d7..d1e006a 100644 --- a/llvm/test/Transforms/OpenMP/spmdization_indirect.ll +++ b/llvm/test/Transforms/OpenMP/spmdization_indirect.ll @@ -1017,7 +1017,7 @@ declare void @unknown() #5 declare void @unknowni32p(ptr) #5 ; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) -declare void @llvm.lifetime.start.p0(i64 immarg, ptr captures(none)) #6 +declare void @llvm.lifetime.start.p0(ptr captures(none)) #6 define weak i32 @__kmpc_target_init(ptr %0, ptr %1) { ; AMDGPU-LABEL: define {{[^@]+}}@__kmpc_target_init @@ -1037,7 +1037,7 @@ declare void @__kmpc_get_shared_variables(ptr) declare void @__kmpc_parallel_51(ptr, i32, i32, i32, i32, ptr, ptr, ptr, i64) #7 ; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr captures(none)) #6 +declare void @llvm.lifetime.end.p0(ptr captures(none)) #6 ; Function Attrs: convergent declare void @spmd_amenable() #4 diff --git a/llvm/test/Transforms/OpenMP/spmdization_remarks.ll b/llvm/test/Transforms/OpenMP/spmdization_remarks.ll index ef36937b..f30e827 100644 --- a/llvm/test/Transforms/OpenMP/spmdization_remarks.ll +++ b/llvm/test/Transforms/OpenMP/spmdization_remarks.ll @@ -75,10 +75,10 @@ common.ret: ; preds = %entry, %user_code.e user_code.entry: ; preds = %entry %1 = call i32 @__kmpc_global_thread_num(ptr nonnull @3) #3 call void @unknown() #6, !dbg !20 - call void @llvm.lifetime.start.p0(i64 0, ptr nonnull %captured_vars_addrs.i.i) #3 + call void @llvm.lifetime.start.p0(ptr nonnull %captured_vars_addrs.i.i) #3 %2 = call i32 @__kmpc_global_thread_num(ptr noundef nonnull @13) #3 call void @__kmpc_parallel_51(ptr noundef nonnull @13, i32 %2, i32 noundef 1, i32 noundef -1, i32 noundef -1, ptr noundef @__omp_outlined__2, ptr noundef @__omp_outlined__2_wrapper, ptr noundef nonnull %captured_vars_addrs.i.i, i64 noundef 0) #3, !dbg !23 - call void @llvm.lifetime.end.p0(i64 0, ptr nonnull %captured_vars_addrs.i.i) #3, !dbg !26 + call void @llvm.lifetime.end.p0(ptr nonnull %captured_vars_addrs.i.i) #3, !dbg !26 call void @unknown() #6, !dbg !27 call void @__kmpc_target_deinit() #3, !dbg !28 br label %common.ret @@ -119,18 +119,18 @@ common.ret: ; preds = %entry, %user_code.e user_code.entry: ; preds = %entry %1 = call i32 @__kmpc_global_thread_num(ptr nonnull @9) #3 - call void @llvm.lifetime.start.p0(i64 0, ptr nonnull %captured_vars_addrs.i2.i) #3 + call void @llvm.lifetime.start.p0(ptr nonnull %captured_vars_addrs.i2.i) #3 %2 = call i32 @__kmpc_global_thread_num(ptr noundef nonnull @13) #3 call void @__kmpc_parallel_51(ptr noundef nonnull @13, i32 %2, i32 noundef 1, i32 noundef -1, i32 noundef -1, ptr noundef @__omp_outlined__2, ptr noundef @__omp_outlined__2_wrapper, ptr noundef nonnull %captured_vars_addrs.i2.i, i64 noundef 0) #3, !dbg !35 - call void @llvm.lifetime.end.p0(i64 0, ptr nonnull %captured_vars_addrs.i2.i) #3, !dbg !39 - call void @llvm.lifetime.start.p0(i64 0, ptr nonnull %captured_vars_addrs.i2.i) #3 + call void @llvm.lifetime.end.p0(ptr nonnull %captured_vars_addrs.i2.i) #3, !dbg !39 + call void @llvm.lifetime.start.p0(ptr nonnull %captured_vars_addrs.i2.i) #3 %3 = call i32 @__kmpc_global_thread_num(ptr noundef nonnull @13) #3 call void @__kmpc_parallel_51(ptr noundef nonnull @13, i32 %3, i32 noundef 1, i32 noundef -1, i32 noundef -1, ptr noundef @__omp_outlined__2, ptr noundef @__omp_outlined__2_wrapper, ptr noundef nonnull %captured_vars_addrs.i2.i, i64 noundef 0) #3, !dbg !40 - call void @llvm.lifetime.end.p0(i64 0, ptr nonnull %captured_vars_addrs.i2.i) #3, !dbg !42 - call void @llvm.lifetime.start.p0(i64 0, ptr nonnull %captured_vars_addrs.i2.i) #3 + call void @llvm.lifetime.end.p0(ptr nonnull %captured_vars_addrs.i2.i) #3, !dbg !42 + call void @llvm.lifetime.start.p0(ptr nonnull %captured_vars_addrs.i2.i) #3 %4 = call i32 @__kmpc_global_thread_num(ptr noundef nonnull @13) #3 call void @__kmpc_parallel_51(ptr noundef nonnull @13, i32 %4, i32 noundef 1, i32 noundef -1, i32 noundef -1, ptr noundef @__omp_outlined__2, ptr noundef @__omp_outlined__2_wrapper, ptr noundef nonnull %captured_vars_addrs.i2.i, i64 noundef 0) #3, !dbg !43 - call void @llvm.lifetime.end.p0(i64 0, ptr nonnull %captured_vars_addrs.i2.i) #3, !dbg !45 + call void @llvm.lifetime.end.p0(ptr nonnull %captured_vars_addrs.i2.i) #3, !dbg !45 call void @spmd_amenable() call void @__kmpc_target_deinit() #3, !dbg !46 br label %common.ret @@ -157,10 +157,10 @@ declare void @__kmpc_get_shared_variables(ptr) local_unnamed_addr declare void @__kmpc_parallel_51(ptr, i32, i32, i32, i32, ptr, ptr, ptr, i64) local_unnamed_addr ; Function Attrs: argmemonly nofree nosync nounwind willreturn -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #5 +declare void @llvm.lifetime.start.p0(ptr nocapture) #5 ; Function Attrs: argmemonly nofree nosync nounwind willreturn -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #5 +declare void @llvm.lifetime.end.p0(ptr nocapture) #5 declare void @spmd_amenable() #7 diff --git a/llvm/test/Transforms/PGOProfile/consecutive-zeros.ll b/llvm/test/Transforms/PGOProfile/consecutive-zeros.ll index a7337d0..014f95f 100644 --- a/llvm/test/Transforms/PGOProfile/consecutive-zeros.ll +++ b/llvm/test/Transforms/PGOProfile/consecutive-zeros.ll @@ -48,11 +48,11 @@ for.end6: ret void } -declare void @llvm.lifetime.start(i64, ptr nocapture) +declare void @llvm.lifetime.start(ptr nocapture) declare void @llvm.memcpy.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1) declare i32 @memcmp(ptr, ptr, i64) declare i32 @bcmp(ptr, ptr, i64) -declare void @llvm.lifetime.end(i64, ptr nocapture) +declare void @llvm.lifetime.end(ptr nocapture) diff --git a/llvm/test/Transforms/PGOProfile/entry_alloca.ll b/llvm/test/Transforms/PGOProfile/entry_alloca.ll index 580f055..c791e1d 100644 --- a/llvm/test/Transforms/PGOProfile/entry_alloca.ll +++ b/llvm/test/Transforms/PGOProfile/entry_alloca.ll @@ -19,8 +19,8 @@ define dso_local double @foo() { %1 = alloca %struct.A, align 4 %2 = alloca %struct.B, align 8 - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %1) - call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %2) + call void @llvm.lifetime.start.p0(ptr nonnull %1) + call void @llvm.lifetime.start.p0(ptr nonnull %2) call void @bar(ptr noundef nonnull %1, ptr noundef nonnull %2) %3 = load i32, ptr %1, align 4 %4 = icmp sgt i32 %3, 0 @@ -48,8 +48,8 @@ define dso_local double @foo() { 21: %22 = phi double [ 0.000000e+00, %0 ], [ %18, %9 ] - call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %2) - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %1) + call void @llvm.lifetime.end.p0(ptr nonnull %2) + call void @llvm.lifetime.end.p0(ptr nonnull %1) ret double %22 } diff --git a/llvm/test/Transforms/PGOProfile/memop_size_annotation.ll b/llvm/test/Transforms/PGOProfile/memop_size_annotation.ll index 3ef185a..9122454 100644 --- a/llvm/test/Transforms/PGOProfile/memop_size_annotation.ll +++ b/llvm/test/Transforms/PGOProfile/memop_size_annotation.ll @@ -56,11 +56,11 @@ for.end6: ret void } -declare void @llvm.lifetime.start(i64, ptr nocapture) +declare void @llvm.lifetime.start(ptr nocapture) declare void @llvm.memcpy.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1) declare i32 @memcmp(ptr, ptr, i64) declare i32 @bcmp(ptr, ptr, i64) -declare void @llvm.lifetime.end(i64, ptr nocapture) +declare void @llvm.lifetime.end(ptr nocapture) diff --git a/llvm/test/Transforms/PGOProfile/memop_size_opt.ll b/llvm/test/Transforms/PGOProfile/memop_size_opt.ll index c4f749b..f63989a 100644 --- a/llvm/test/Transforms/PGOProfile/memop_size_opt.ll +++ b/llvm/test/Transforms/PGOProfile/memop_size_opt.ll @@ -181,14 +181,14 @@ for.end6: !30 = !{!"VP", i32 1, i64 556, i64 0, i64 99, i64 2, i64 88, i64 3, i64 77, i64 9, i64 72, i64 4, i64 66, i64 5, i64 55, i64 6, i64 44, i64 7, i64 33, i64 8, i64 22} !31 = !{!"VP", i32 1, i64 556, i64 0, i64 99, i64 2, i64 88, i64 3, i64 77, i64 9, i64 72, i64 4, i64 66, i64 5, i64 55, i64 6, i64 44, i64 7, i64 33, i64 8, i64 22} -declare void @llvm.lifetime.start(i64, ptr nocapture) +declare void @llvm.lifetime.start(ptr nocapture) declare void @llvm.memcpy.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1) declare i32 @memcmp(ptr, ptr, i64) declare i32 @bcmp(ptr, ptr, i64) -declare void @llvm.lifetime.end(i64, ptr nocapture) +declare void @llvm.lifetime.end(ptr nocapture) ; YAML: --- !Passed ; YAML-NEXT: Pass: pgo-memop-opt diff --git a/llvm/test/Transforms/PGOProfile/misexpect-branch-correct.ll b/llvm/test/Transforms/PGOProfile/misexpect-branch-correct.ll index f187d41..0a1efda 100644 --- a/llvm/test/Transforms/PGOProfile/misexpect-branch-correct.ll +++ b/llvm/test/Transforms/PGOProfile/misexpect-branch-correct.ll @@ -23,10 +23,10 @@ define i32 @bar() #0 { entry: %rando = alloca i32, align 4 %x = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr %rando) #4 + call void @llvm.lifetime.start.p0(ptr %rando) #4 %call = call i32 (...) @buzz() store i32 %call, ptr %rando, align 4, !tbaa !3 - call void @llvm.lifetime.start.p0(i64 4, ptr %x) #4 + call void @llvm.lifetime.start.p0(ptr %x) #4 store i32 0, ptr %x, align 4, !tbaa !3 %0 = load i32, ptr %rando, align 4, !tbaa !3 %rem = srem i32 %0, 200000 @@ -52,13 +52,13 @@ if.else: ; preds = %entry if.end: ; preds = %if.else, %if.then %2 = load i32, ptr %x, align 4, !tbaa !3 - call void @llvm.lifetime.end.p0(i64 4, ptr %x) #4 - call void @llvm.lifetime.end.p0(i64 4, ptr %rando) #4 + call void @llvm.lifetime.end.p0(ptr %x) #4 + call void @llvm.lifetime.end.p0(ptr %rando) #4 ret i32 %2 } ; Function Attrs: argmemonly nounwind willreturn -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 declare i32 @buzz(...) #2 @@ -70,7 +70,7 @@ declare i32 @baz(i32) #2 declare i32 @foo(i32) #2 ; Function Attrs: argmemonly nounwind willreturn -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+cx8,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } attributes #1 = { argmemonly nounwind willreturn } diff --git a/llvm/test/Transforms/PGOProfile/misexpect-branch-overflow.ll b/llvm/test/Transforms/PGOProfile/misexpect-branch-overflow.ll index 146ad44..68b233e 100644 --- a/llvm/test/Transforms/PGOProfile/misexpect-branch-overflow.ll +++ b/llvm/test/Transforms/PGOProfile/misexpect-branch-overflow.ll @@ -20,10 +20,10 @@ define i32 @bar() #0 !dbg !6 { entry: %rando = alloca i32, align 4 %x = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr %rando) #4, !dbg !9 + call void @llvm.lifetime.start.p0(ptr %rando) #4, !dbg !9 %call = call i32 (...) @buzz(), !dbg !9 store i32 %call, ptr %rando, align 4, !dbg !9, !tbaa !10 - call void @llvm.lifetime.start.p0(i64 4, ptr %x) #4, !dbg !14 + call void @llvm.lifetime.start.p0(ptr %x) #4, !dbg !14 store i32 0, ptr %x, align 4, !dbg !14, !tbaa !10 %0 = load i32, ptr %rando, align 4, !dbg !15, !tbaa !10 %rem = srem i32 %0, 200000, !dbg !15 @@ -49,13 +49,13 @@ if.else: ; preds = %entry if.end: ; preds = %if.else, %if.then %2 = load i32, ptr %x, align 4, !dbg !19, !tbaa !10 - call void @llvm.lifetime.end.p0(i64 4, ptr %x) #4, !dbg !20 - call void @llvm.lifetime.end.p0(i64 4, ptr %rando) #4, !dbg !20 + call void @llvm.lifetime.end.p0(ptr %x) #4, !dbg !20 + call void @llvm.lifetime.end.p0(ptr %rando) #4, !dbg !20 ret i32 %2, !dbg !19 } ; Function Attrs: argmemonly nounwind willreturn -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 declare i32 @buzz(...) #2 @@ -67,7 +67,7 @@ declare i32 @baz(i32) #2 declare i32 @foo(i32) #2 ; Function Attrs: argmemonly nounwind willreturn -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+cx8,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } attributes #1 = { argmemonly nounwind willreturn } diff --git a/llvm/test/Transforms/PGOProfile/misexpect-branch-stripped.ll b/llvm/test/Transforms/PGOProfile/misexpect-branch-stripped.ll index 18677b7..2f188f5 100644 --- a/llvm/test/Transforms/PGOProfile/misexpect-branch-stripped.ll +++ b/llvm/test/Transforms/PGOProfile/misexpect-branch-stripped.ll @@ -33,10 +33,10 @@ define i32 @bar() #0 { entry: %rando = alloca i32, align 4 %x = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr %rando) #4 + call void @llvm.lifetime.start.p0(ptr %rando) #4 %call = call i32 (...) @buzz() store i32 %call, ptr %rando, align 4, !tbaa !3 - call void @llvm.lifetime.start.p0(i64 4, ptr %x) #4 + call void @llvm.lifetime.start.p0(ptr %x) #4 store i32 0, ptr %x, align 4, !tbaa !3 %0 = load i32, ptr %rando, align 4, !tbaa !3 %rem = srem i32 %0, 200000 @@ -62,13 +62,13 @@ if.else: ; preds = %entry if.end: ; preds = %if.else, %if.then %2 = load i32, ptr %x, align 4, !tbaa !3 - call void @llvm.lifetime.end.p0(i64 4, ptr %x) #4 - call void @llvm.lifetime.end.p0(i64 4, ptr %rando) #4 + call void @llvm.lifetime.end.p0(ptr %x) #4 + call void @llvm.lifetime.end.p0(ptr %rando) #4 ret i32 %2 } ; Function Attrs: argmemonly nounwind willreturn -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 declare i32 @buzz(...) #2 @@ -80,7 +80,7 @@ declare i32 @baz(i32) #2 declare i32 @foo(i32) #2 ; Function Attrs: argmemonly nounwind willreturn -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+cx8,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } attributes #1 = { argmemonly nounwind willreturn } diff --git a/llvm/test/Transforms/PGOProfile/misexpect-branch-unpredictable.ll b/llvm/test/Transforms/PGOProfile/misexpect-branch-unpredictable.ll index 1e7e8c1..4add781 100644 --- a/llvm/test/Transforms/PGOProfile/misexpect-branch-unpredictable.ll +++ b/llvm/test/Transforms/PGOProfile/misexpect-branch-unpredictable.ll @@ -21,10 +21,10 @@ define i32 @bar() #0 { entry: %rando = alloca i32, align 4 %x = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr %rando) #3 + call void @llvm.lifetime.start.p0(ptr %rando) #3 %call = call i32 (...) @buzz() store i32 %call, ptr %rando, align 4, !tbaa !2 - call void @llvm.lifetime.start.p0(i64 4, ptr %x) #3 + call void @llvm.lifetime.start.p0(ptr %x) #3 store i32 0, ptr %x, align 4, !tbaa !2 %0 = load i32, ptr %rando, align 4, !tbaa !2 %rem = srem i32 %0, 200000 @@ -49,13 +49,13 @@ if.else: ; preds = %entry if.end: ; preds = %if.else, %if.then %2 = load i32, ptr %x, align 4, !tbaa !2 - call void @llvm.lifetime.end.p0(i64 4, ptr %x) #3 - call void @llvm.lifetime.end.p0(i64 4, ptr %rando) #3 + call void @llvm.lifetime.end.p0(ptr %x) #3 + call void @llvm.lifetime.end.p0(ptr %rando) #3 ret i32 %2 } ; Function Attrs: argmemonly nounwind willreturn -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 declare i32 @buzz(...) #2 @@ -64,7 +64,7 @@ declare i32 @baz(i32) #2 declare i32 @foo(i32) #2 ; Function Attrs: argmemonly nounwind willreturn -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+cx8,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } attributes #1 = { argmemonly nounwind willreturn } diff --git a/llvm/test/Transforms/PGOProfile/misexpect-branch.ll b/llvm/test/Transforms/PGOProfile/misexpect-branch.ll index 10c3718..5a7731b 100644 --- a/llvm/test/Transforms/PGOProfile/misexpect-branch.ll +++ b/llvm/test/Transforms/PGOProfile/misexpect-branch.ll @@ -37,10 +37,10 @@ define i32 @bar() #0 !dbg !6 { entry: %rando = alloca i32, align 4 %x = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr %rando) #4, !dbg !9 + call void @llvm.lifetime.start.p0(ptr %rando) #4, !dbg !9 %call = call i32 (...) @buzz(), !dbg !9 store i32 %call, ptr %rando, align 4, !dbg !9, !tbaa !10 - call void @llvm.lifetime.start.p0(i64 4, ptr %x) #4, !dbg !14 + call void @llvm.lifetime.start.p0(ptr %x) #4, !dbg !14 store i32 0, ptr %x, align 4, !dbg !14, !tbaa !10 %0 = load i32, ptr %rando, align 4, !dbg !15, !tbaa !10 %rem = srem i32 %0, 200000, !dbg !15 @@ -66,13 +66,13 @@ if.else: ; preds = %entry if.end: ; preds = %if.else, %if.then %2 = load i32, ptr %x, align 4, !dbg !19, !tbaa !10 - call void @llvm.lifetime.end.p0(i64 4, ptr %x) #4, !dbg !20 - call void @llvm.lifetime.end.p0(i64 4, ptr %rando) #4, !dbg !20 + call void @llvm.lifetime.end.p0(ptr %x) #4, !dbg !20 + call void @llvm.lifetime.end.p0(ptr %rando) #4, !dbg !20 ret i32 %2, !dbg !19 } ; Function Attrs: argmemonly nounwind willreturn -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 declare i32 @buzz(...) #2 @@ -84,7 +84,7 @@ declare i32 @baz(i32) #2 declare i32 @foo(i32) #2 ; Function Attrs: argmemonly nounwind willreturn -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+cx8,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } attributes #1 = { argmemonly nounwind willreturn } diff --git a/llvm/test/Transforms/PGOProfile/misexpect-switch-default.ll b/llvm/test/Transforms/PGOProfile/misexpect-switch-default.ll index 6e21c08..859ba72 100644 --- a/llvm/test/Transforms/PGOProfile/misexpect-switch-default.ll +++ b/llvm/test/Transforms/PGOProfile/misexpect-switch-default.ll @@ -40,7 +40,7 @@ target triple = "x86_64-unknown-linux-gnu" define dso_local void @init_arry() #0 { entry: %i = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr %i) #6 + call void @llvm.lifetime.start.p0(ptr %i) #6 store i32 0, ptr %i, align 4, !tbaa !4 br label %for.cond @@ -65,12 +65,12 @@ for.inc: ; preds = %for.body br label %for.cond for.end: ; preds = %for.cond - call void @llvm.lifetime.end.p0(i64 4, ptr %i) #6 + call void @llvm.lifetime.end.p0(ptr %i) #6 ret void } ; Function Attrs: argmemonly nounwind willreturn -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 ; Function Attrs: nounwind readnone speculatable willreturn declare void @llvm.dbg.declare(metadata, metadata, metadata) #2 @@ -79,7 +79,7 @@ declare void @llvm.dbg.declare(metadata, metadata, metadata) #2 declare dso_local i32 @rand() #3 ; Function Attrs: argmemonly nounwind willreturn -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 ; Function Attrs: nounwind uwtable define dso_local i32 @main() #0 { @@ -90,9 +90,9 @@ entry: %condition = alloca i32, align 4 store i32 0, ptr %retval, align 4 call void @init_arry() - call void @llvm.lifetime.start.p0(i64 4, ptr %val) #6 + call void @llvm.lifetime.start.p0(ptr %val) #6 store i32 0, ptr %val, align 4, !tbaa !4 - call void @llvm.lifetime.start.p0(i64 4, ptr %j) #6 + call void @llvm.lifetime.start.p0(ptr %j) #6 store i32 0, ptr %j, align 4, !tbaa !4 br label %for.cond @@ -102,7 +102,7 @@ for.cond: ; preds = %for.inc, %entry br i1 %cmp, label %for.body, label %for.end for.body: ; preds = %for.cond - call void @llvm.lifetime.start.p0(i64 4, ptr %condition) #6 + call void @llvm.lifetime.start.p0(ptr %condition) #6 %call = call i32 @rand() #6 %rem = srem i32 %call, 5 store i32 %rem, ptr %condition, align 4, !tbaa !4 @@ -138,7 +138,7 @@ sw.default: ; preds = %for.body unreachable sw.epilog: ; preds = %sw.bb3, %sw.bb2, %sw.bb - call void @llvm.lifetime.end.p0(i64 4, ptr %condition) #6 + call void @llvm.lifetime.end.p0(ptr %condition) #6 br label %for.inc for.inc: ; preds = %sw.epilog @@ -148,8 +148,8 @@ for.inc: ; preds = %sw.epilog br label %for.cond for.end: ; preds = %for.cond - call void @llvm.lifetime.end.p0(i64 4, ptr %j) #6 - call void @llvm.lifetime.end.p0(i64 4, ptr %val) #6 + call void @llvm.lifetime.end.p0(ptr %j) #6 + call void @llvm.lifetime.end.p0(ptr %val) #6 ret i32 0 } diff --git a/llvm/test/Transforms/PGOProfile/misexpect-switch.ll b/llvm/test/Transforms/PGOProfile/misexpect-switch.ll index ebecee1..242d5b8 100644 --- a/llvm/test/Transforms/PGOProfile/misexpect-switch.ll +++ b/llvm/test/Transforms/PGOProfile/misexpect-switch.ll @@ -43,7 +43,7 @@ target triple = "x86_64-unknown-linux-gnu" define dso_local void @init_arry() #0 !dbg !21 { entry: %i = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr %i) #6, !dbg !26 + call void @llvm.lifetime.start.p0(ptr %i) #6, !dbg !26 call void @llvm.dbg.declare(metadata ptr %i, metadata !25, metadata !DIExpression()), !dbg !27 store i32 0, ptr %i, align 4, !dbg !28, !tbaa !30 br label %for.cond, !dbg !34 @@ -69,12 +69,12 @@ for.inc: ; preds = %for.body br label %for.cond, !dbg !47, !llvm.loop !48 for.end: ; preds = %for.cond - call void @llvm.lifetime.end.p0(i64 4, ptr %i) #6, !dbg !50 + call void @llvm.lifetime.end.p0(ptr %i) #6, !dbg !50 ret void, !dbg !50 } ; Function Attrs: argmemonly nounwind willreturn -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 ; Function Attrs: nounwind readnone speculatable willreturn declare void @llvm.dbg.declare(metadata, metadata, metadata) #2 @@ -83,7 +83,7 @@ declare void @llvm.dbg.declare(metadata, metadata, metadata) #2 declare dso_local i32 @rand() #3 ; Function Attrs: argmemonly nounwind willreturn -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 ; Function Attrs: nounwind uwtable define dso_local i32 @main() #0 !dbg !51 { @@ -94,10 +94,10 @@ entry: %condition = alloca i32, align 4 store i32 0, ptr %retval, align 4 call void @init_arry(), !dbg !62 - call void @llvm.lifetime.start.p0(i64 4, ptr %val) #6, !dbg !63 + call void @llvm.lifetime.start.p0(ptr %val) #6, !dbg !63 call void @llvm.dbg.declare(metadata ptr %val, metadata !55, metadata !DIExpression()), !dbg !64 store i32 0, ptr %val, align 4, !dbg !64, !tbaa !30 - call void @llvm.lifetime.start.p0(i64 4, ptr %j) #6, !dbg !65 + call void @llvm.lifetime.start.p0(ptr %j) #6, !dbg !65 call void @llvm.dbg.declare(metadata ptr %j, metadata !56, metadata !DIExpression()), !dbg !66 store i32 0, ptr %j, align 4, !dbg !67, !tbaa !30 br label %for.cond, !dbg !68 @@ -108,7 +108,7 @@ for.cond: ; preds = %for.inc, %entry br i1 %cmp, label %for.body, label %for.end, !dbg !71 for.body: ; preds = %for.cond - call void @llvm.lifetime.start.p0(i64 4, ptr %condition) #6, !dbg !72 + call void @llvm.lifetime.start.p0(ptr %condition) #6, !dbg !72 call void @llvm.dbg.declare(metadata ptr %condition, metadata !57, metadata !DIExpression()), !dbg !73 %call = call i32 @rand() #6, !dbg !74 %rem = srem i32 %call, 5, !dbg !75 @@ -145,7 +145,7 @@ sw.default: ; preds = %for.body unreachable, !dbg !87 sw.epilog: ; preds = %sw.bb3, %sw.bb2, %sw.bb - call void @llvm.lifetime.end.p0(i64 4, ptr %condition) #6, !dbg !88 + call void @llvm.lifetime.end.p0(ptr %condition) #6, !dbg !88 br label %for.inc, !dbg !89 for.inc: ; preds = %sw.epilog @@ -155,8 +155,8 @@ for.inc: ; preds = %sw.epilog br label %for.cond, !dbg !91, !llvm.loop !92 for.end: ; preds = %for.cond - call void @llvm.lifetime.end.p0(i64 4, ptr %j) #6, !dbg !94 - call void @llvm.lifetime.end.p0(i64 4, ptr %val) #6, !dbg !94 + call void @llvm.lifetime.end.p0(ptr %j) #6, !dbg !94 + call void @llvm.lifetime.end.p0(ptr %val) #6, !dbg !94 ret i32 0, !dbg !95 } diff --git a/llvm/test/Transforms/PartialInlining/switch_stmt.ll b/llvm/test/Transforms/PartialInlining/switch_stmt.ll index 3f43369..d99fbba 100644 --- a/llvm/test/Transforms/PartialInlining/switch_stmt.ll +++ b/llvm/test/Transforms/PartialInlining/switch_stmt.ll @@ -62,7 +62,7 @@ define dso_local signext i32 @caller(i32 signext %c) !prof !30 { ; CHECK-LABEL: @caller( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[RC_I:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[RC_I]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[RC_I]]) ; CHECK-NEXT: store i32 0, ptr [[RC_I]], align 4 ; CHECK-NEXT: switch i32 [[C:%.*]], label [[SW_DEFAULT_I:%.*]] [ ; CHECK-NEXT: i32 0, label [[CODEREPL_I:%.*]] @@ -83,7 +83,7 @@ define dso_local signext i32 @caller(i32 signext %c) !prof !30 { ; CHECK-NEXT: br label [[CALLEE_1_EXIT]] ; CHECK: callee.1.exit: ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[RC_I]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[RC_I]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[RC_I]]) ; entry: %0 = call signext i32 @callee(i32 signext %c, i32 signext %c) diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/hoist-runtime-checks.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/hoist-runtime-checks.ll index b4b12da..141503d 100644 --- a/llvm/test/Transforms/PhaseOrdering/AArch64/hoist-runtime-checks.ll +++ b/llvm/test/Transforms/PhaseOrdering/AArch64/hoist-runtime-checks.ll @@ -68,9 +68,9 @@ entry: store ptr %array, ptr %array.addr, align 8 store i32 %count, ptr %count.addr, align 4 store i32 %n, ptr %n.addr, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr %sum) #3 + call void @llvm.lifetime.start.p0(ptr %sum) #3 store i32 0, ptr %sum, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr %i) #3 + call void @llvm.lifetime.start.p0(ptr %i) #3 store i32 0, ptr %i, align 4 br label %for.cond @@ -81,7 +81,7 @@ for.cond: ; preds = %for.inc, %entry br i1 %cmp, label %for.body, label %for.cond.cleanup for.cond.cleanup: ; preds = %for.cond - call void @llvm.lifetime.end.p0(i64 4, ptr %i) #3 + call void @llvm.lifetime.end.p0(ptr %i) #3 br label %for.end for.body: ; preds = %for.cond @@ -113,7 +113,7 @@ for.inc: ; preds = %if.end for.end: ; preds = %for.cond.cleanup %9 = load i32, ptr %sum, align 4 - call void @llvm.lifetime.end.p0(i64 4, ptr %sum) + call void @llvm.lifetime.end.p0(ptr %sum) ret i32 %9 } @@ -184,9 +184,9 @@ entry: %1 = getelementptr inbounds { ptr, i64 }, ptr %s, i32 0, i32 1 store i64 %s.coerce1, ptr %1, align 8 store i64 %n, ptr %n.addr, align 8 - call void @llvm.lifetime.start.p0(i64 4, ptr %ret) #7 + call void @llvm.lifetime.start.p0(ptr %ret) #7 store i32 0, ptr %ret, align 4 - call void @llvm.lifetime.start.p0(i64 8, ptr %i) #7 + call void @llvm.lifetime.start.p0(ptr %i) #7 store i64 0, ptr %i, align 8 br label %for.cond @@ -197,7 +197,7 @@ for.cond: ; preds = %for.inc, %entry br i1 %cmp, label %for.body, label %for.cond.cleanup for.cond.cleanup: ; preds = %for.cond - call void @llvm.lifetime.end.p0(i64 8, ptr %i) #7 + call void @llvm.lifetime.end.p0(ptr %i) #7 br label %for.end for.body: ; preds = %for.cond @@ -217,7 +217,7 @@ for.inc: ; preds = %for.body for.end: ; preds = %for.cond.cleanup %8 = load i32, ptr %ret, align 4 - call void @llvm.lifetime.end.p0(i64 4, ptr %ret) + call void @llvm.lifetime.end.p0(ptr %ret) ret i32 %8 } @@ -283,11 +283,11 @@ entry: ret i64 %0 } -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) declare void @llvm.trap() -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) ;. ; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} ; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/hoisting-sinking-required-for-vectorization.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/hoisting-sinking-required-for-vectorization.ll index f583a61..e74bf592 100644 --- a/llvm/test/Transforms/PhaseOrdering/AArch64/hoisting-sinking-required-for-vectorization.ll +++ b/llvm/test/Transforms/PhaseOrdering/AArch64/hoisting-sinking-required-for-vectorization.ll @@ -89,7 +89,7 @@ entry: %i = alloca i32, align 4 store ptr %X, ptr %X.addr, align 8 store ptr %Y, ptr %Y.addr, align 8 - call void @llvm.lifetime.start.p0(i64 4, ptr %i) #2 + call void @llvm.lifetime.start.p0(ptr %i) #2 store i32 0, ptr %i, align 4 br label %for.cond @@ -99,7 +99,7 @@ for.cond: ; preds = %for.inc, %entry br i1 %cmp, label %for.body, label %for.cond.cleanup for.cond.cleanup: ; preds = %for.cond - call void @llvm.lifetime.end.p0(i64 4, ptr %i) #2 + call void @llvm.lifetime.end.p0(ptr %i) #2 br label %for.end for.body: ; preds = %for.cond @@ -237,6 +237,6 @@ exit: ret void } -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/matrix-extract-insert.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/matrix-extract-insert.ll index 089511d..9679806 100644 --- a/llvm/test/Transforms/PhaseOrdering/AArch64/matrix-extract-insert.ll +++ b/llvm/test/Transforms/PhaseOrdering/AArch64/matrix-extract-insert.ll @@ -408,7 +408,7 @@ entry: store i32 %i, ptr %i.addr, align 4 store ptr %A, ptr %A.addr, align 8 store ptr %B, ptr %B.addr, align 8 - call void @llvm.lifetime.start.p0(i64 4, ptr %j) #3 + call void @llvm.lifetime.start.p0(ptr %j) #3 store i32 0, ptr %j, align 4 br label %for.cond @@ -419,11 +419,11 @@ for.cond: ; preds = %for.inc12, %entry for.cond.cleanup: ; preds = %for.cond store i32 2, ptr %cleanup.dest.slot, align 4 - call void @llvm.lifetime.end.p0(i64 4, ptr %j) #3 + call void @llvm.lifetime.end.p0(ptr %j) #3 br label %for.end14 for.body: ; preds = %for.cond - call void @llvm.lifetime.start.p0(i64 4, ptr %k) #3 + call void @llvm.lifetime.start.p0(ptr %k) #3 store i32 0, ptr %k, align 4 br label %for.cond1 @@ -435,7 +435,7 @@ for.cond1: ; preds = %for.inc, %for.body for.cond.cleanup3: ; preds = %for.cond1 store i32 5, ptr %cleanup.dest.slot, align 4 - call void @llvm.lifetime.end.p0(i64 4, ptr %k) #3 + call void @llvm.lifetime.end.p0(ptr %k) #3 br label %for.end for.body4: ; preds = %for.cond1 @@ -501,13 +501,13 @@ for.end14: ; preds = %for.cond.cleanup } ; Function Attrs: argmemonly nofree nosync nounwind willreturn -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 ; Function Attrs: inaccessiblememonly nofree nosync nounwind willreturn declare void @llvm.assume(i1 noundef) #2 ; Function Attrs: argmemonly nofree nosync nounwind willreturn -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 ; Function Attrs: nounwind ssp uwtable mustprogress diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/predicated-reduction.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/predicated-reduction.ll index c7098d2..e8709a5 100644 --- a/llvm/test/Transforms/PhaseOrdering/AArch64/predicated-reduction.ll +++ b/llvm/test/Transforms/PhaseOrdering/AArch64/predicated-reduction.ll @@ -108,13 +108,13 @@ entry: store ptr %samples, ptr %samples.addr, align 8 store double %Y, ptr %Y.addr, align 8 store double %Z, ptr %Z.addr, align 8 - call void @llvm.lifetime.start.p0(i64 4, ptr %i) #2 - call void @llvm.lifetime.start.p0(i64 4, ptr %block) #2 - call void @llvm.lifetime.start.p0(i64 8, ptr %rngVal) #2 - call void @llvm.lifetime.start.p0(i64 8, ptr %callValue) #2 - call void @llvm.lifetime.start.p0(i64 8, ptr %v0) #2 + call void @llvm.lifetime.start.p0(ptr %i) #2 + call void @llvm.lifetime.start.p0(ptr %block) #2 + call void @llvm.lifetime.start.p0(ptr %rngVal) #2 + call void @llvm.lifetime.start.p0(ptr %callValue) #2 + call void @llvm.lifetime.start.p0(ptr %v0) #2 store double 0.000000e+00, ptr %v0, align 8 - call void @llvm.lifetime.start.p0(i64 8, ptr %v1) #2 + call void @llvm.lifetime.start.p0(ptr %v1) #2 store double 0.000000e+00, ptr %v1, align 8 store i32 0, ptr %i, align 4 br label %for.cond @@ -169,12 +169,12 @@ for.end: ; preds = %for.cond %15 = load double, ptr %v0, align 8 %16 = load double, ptr %v1, align 8 %add5 = fadd fast double %15, %16 - call void @llvm.lifetime.end.p0(i64 8, ptr %v1) #2 - call void @llvm.lifetime.end.p0(i64 8, ptr %v0) #2 - call void @llvm.lifetime.end.p0(i64 8, ptr %callValue) #2 - call void @llvm.lifetime.end.p0(i64 8, ptr %rngVal) #2 - call void @llvm.lifetime.end.p0(i64 4, ptr %block) #2 - call void @llvm.lifetime.end.p0(i64 4, ptr %i) #2 + call void @llvm.lifetime.end.p0(ptr %v1) #2 + call void @llvm.lifetime.end.p0(ptr %v0) #2 + call void @llvm.lifetime.end.p0(ptr %callValue) #2 + call void @llvm.lifetime.end.p0(ptr %rngVal) #2 + call void @llvm.lifetime.end.p0(ptr %block) #2 + call void @llvm.lifetime.end.p0(ptr %i) #2 ret double %add5 } @@ -305,13 +305,13 @@ entry: store ptr %samples, ptr %samples.addr, align 8 store double %Y, ptr %Y.addr, align 8 store double %Z, ptr %Z.addr, align 8 - call void @llvm.lifetime.start.p0(i64 4, ptr %i) #4 - call void @llvm.lifetime.start.p0(i64 4, ptr %block) #4 - call void @llvm.lifetime.start.p0(i64 8, ptr %rngVal) #4 - call void @llvm.lifetime.start.p0(i64 8, ptr %callValue) #4 - call void @llvm.lifetime.start.p0(i64 8, ptr %v0) #4 + call void @llvm.lifetime.start.p0(ptr %i) #4 + call void @llvm.lifetime.start.p0(ptr %block) #4 + call void @llvm.lifetime.start.p0(ptr %rngVal) #4 + call void @llvm.lifetime.start.p0(ptr %callValue) #4 + call void @llvm.lifetime.start.p0(ptr %v0) #4 store double 0.000000e+00, ptr %v0, align 8 - call void @llvm.lifetime.start.p0(i64 8, ptr %v1) #4 + call void @llvm.lifetime.start.p0(ptr %v1) #4 store double 0.000000e+00, ptr %v1, align 8 store i32 0, ptr %block, align 4 br label %for.cond @@ -389,19 +389,19 @@ for.end10: ; preds = %for.cond %21 = load double, ptr %v0, align 8 %22 = load double, ptr %v1, align 8 %add11 = fadd fast double %21, %22 - call void @llvm.lifetime.end.p0(i64 8, ptr %v1) #4 - call void @llvm.lifetime.end.p0(i64 8, ptr %v0) #4 - call void @llvm.lifetime.end.p0(i64 8, ptr %callValue) #4 - call void @llvm.lifetime.end.p0(i64 8, ptr %rngVal) #4 - call void @llvm.lifetime.end.p0(i64 4, ptr %block) #4 - call void @llvm.lifetime.end.p0(i64 4, ptr %i) #4 + call void @llvm.lifetime.end.p0(ptr %v1) #4 + call void @llvm.lifetime.end.p0(ptr %v0) #4 + call void @llvm.lifetime.end.p0(ptr %callValue) #4 + call void @llvm.lifetime.end.p0(ptr %rngVal) #4 + call void @llvm.lifetime.end.p0(ptr %block) #4 + call void @llvm.lifetime.end.p0(ptr %i) #4 ret double %add11 } -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) declare void @resample(i32 noundef, ptr noundef) declare double @llvm.exp2.f64(double) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) ;. ; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} ; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/quant_4x4.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/quant_4x4.ll index d55559d..258ef63 100644 --- a/llvm/test/Transforms/PhaseOrdering/AArch64/quant_4x4.ll +++ b/llvm/test/Transforms/PhaseOrdering/AArch64/quant_4x4.ll @@ -525,9 +525,9 @@ entry: store ptr %dct, ptr %dct.addr, align 8 store ptr %mf, ptr %mf.addr, align 8 store ptr %bias, ptr %bias.addr, align 8 - call void @llvm.lifetime.start.p0(i64 4, ptr %nz) #2 + call void @llvm.lifetime.start.p0(ptr %nz) #2 store i32 0, ptr %nz, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr %i) #2 + call void @llvm.lifetime.start.p0(ptr %i) #2 store i32 0, ptr %i, align 4 br label %for.cond @@ -537,7 +537,7 @@ for.cond: ; preds = %for.inc, %entry br i1 %cmp, label %for.body, label %for.cond.cleanup for.cond.cleanup: ; preds = %for.cond - call void @llvm.lifetime.end.p0(i64 4, ptr %i) #2 + call void @llvm.lifetime.end.p0(ptr %i) #2 br label %for.end for.body: ; preds = %for.cond @@ -636,13 +636,13 @@ for.end: ; preds = %for.cond.cleanup %lnot = xor i1 %tobool, true %lnot34 = xor i1 %lnot, true %lnot.ext = zext i1 %lnot34 to i32 - call void @llvm.lifetime.end.p0(i64 4, ptr %nz) #2 + call void @llvm.lifetime.end.p0(ptr %nz) #2 ret i32 %lnot.ext } ; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 ; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/slpordering.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/slpordering.ll index a201983..d19242f 100644 --- a/llvm/test/Transforms/PhaseOrdering/AArch64/slpordering.ll +++ b/llvm/test/Transforms/PhaseOrdering/AArch64/slpordering.ll @@ -136,14 +136,14 @@ entry: store i32 %ip1, ptr %ip1.addr, align 4, !tbaa !8 store ptr %p2, ptr %p2.addr, align 8, !tbaa !4 store i32 %ip2, ptr %ip2.addr, align 4, !tbaa !8 - call void @llvm.lifetime.start.p0(i64 64, ptr %emp) #2 - call void @llvm.lifetime.start.p0(i64 4, ptr %r0) #2 - call void @llvm.lifetime.start.p0(i64 4, ptr %r1) #2 - call void @llvm.lifetime.start.p0(i64 4, ptr %r2) #2 - call void @llvm.lifetime.start.p0(i64 4, ptr %r3) #2 - call void @llvm.lifetime.start.p0(i64 4, ptr %sum) #2 + call void @llvm.lifetime.start.p0(ptr %emp) #2 + call void @llvm.lifetime.start.p0(ptr %r0) #2 + call void @llvm.lifetime.start.p0(ptr %r1) #2 + call void @llvm.lifetime.start.p0(ptr %r2) #2 + call void @llvm.lifetime.start.p0(ptr %r3) #2 + call void @llvm.lifetime.start.p0(ptr %sum) #2 store i32 0, ptr %sum, align 4, !tbaa !8 - call void @llvm.lifetime.start.p0(i64 4, ptr %i) #2 + call void @llvm.lifetime.start.p0(ptr %i) #2 store i32 0, ptr %i, align 4, !tbaa !8 br label %for.cond @@ -153,7 +153,7 @@ for.cond: ; preds = %for.inc, %entry br i1 %cmp, label %for.body, label %for.cond.cleanup for.cond.cleanup: ; preds = %for.cond - call void @llvm.lifetime.end.p0(i64 4, ptr %i) #2 + call void @llvm.lifetime.end.p0(ptr %i) #2 br label %for.end for.body: ; preds = %for.cond @@ -241,22 +241,22 @@ for.body: ; preds = %for.cond %shl42 = shl i32 %sub41, 16 %rdd43 = add nsw i32 %sub36, %shl42 store i32 %rdd43, ptr %r3, align 4, !tbaa !8 - call void @llvm.lifetime.start.p0(i64 4, ptr %e0) #2 + call void @llvm.lifetime.start.p0(ptr %e0) #2 %33 = load i32, ptr %r0, align 4, !tbaa !8 %34 = load i32, ptr %r1, align 4, !tbaa !8 %rdd44 = add i32 %33, %34 store i32 %rdd44, ptr %e0, align 4, !tbaa !8 - call void @llvm.lifetime.start.p0(i64 4, ptr %e1) #2 + call void @llvm.lifetime.start.p0(ptr %e1) #2 %35 = load i32, ptr %r0, align 4, !tbaa !8 %36 = load i32, ptr %r1, align 4, !tbaa !8 %sub45 = sub i32 %35, %36 store i32 %sub45, ptr %e1, align 4, !tbaa !8 - call void @llvm.lifetime.start.p0(i64 4, ptr %e2) #2 + call void @llvm.lifetime.start.p0(ptr %e2) #2 %37 = load i32, ptr %r2, align 4, !tbaa !8 %38 = load i32, ptr %r3, align 4, !tbaa !8 %rdd46 = add i32 %37, %38 store i32 %rdd46, ptr %e2, align 4, !tbaa !8 - call void @llvm.lifetime.start.p0(i64 4, ptr %e3) #2 + call void @llvm.lifetime.start.p0(ptr %e3) #2 %39 = load i32, ptr %r2, align 4, !tbaa !8 %40 = load i32, ptr %r3, align 4, !tbaa !8 %sub47 = sub i32 %39, %40 @@ -293,10 +293,10 @@ for.body: ; preds = %for.cond %rrrayidx61 = getelementptr inbounds [4 x [4 x i32]], ptr %emp, i64 0, i64 %idxprom60 %rrrayidx62 = getelementptr inbounds [4 x i32], ptr %rrrayidx61, i64 0, i64 3 store i32 %sub59, ptr %rrrayidx62, align 4, !tbaa !8 - call void @llvm.lifetime.end.p0(i64 4, ptr %e3) #2 - call void @llvm.lifetime.end.p0(i64 4, ptr %e2) #2 - call void @llvm.lifetime.end.p0(i64 4, ptr %e1) #2 - call void @llvm.lifetime.end.p0(i64 4, ptr %e0) #2 + call void @llvm.lifetime.end.p0(ptr %e3) #2 + call void @llvm.lifetime.end.p0(ptr %e2) #2 + call void @llvm.lifetime.end.p0(ptr %e1) #2 + call void @llvm.lifetime.end.p0(ptr %e0) #2 br label %for.inc for.inc: ; preds = %for.body @@ -316,7 +316,7 @@ for.inc: ; preds = %for.body br label %for.cond, !llvm.loop !11 for.end: ; preds = %for.cond.cleanup - call void @llvm.lifetime.start.p0(i64 4, ptr %i65) #2 + call void @llvm.lifetime.start.p0(ptr %i65) #2 store i32 0, ptr %i65, align 4, !tbaa !8 br label %for.cond66 @@ -326,11 +326,11 @@ for.cond66: ; preds = %for.inc114, %for.en br i1 %cmp67, label %for.body70, label %for.cond.cleanup69 for.cond.cleanup69: ; preds = %for.cond66 - call void @llvm.lifetime.end.p0(i64 4, ptr %i65) #2 + call void @llvm.lifetime.end.p0(ptr %i65) #2 br label %for.end116 for.body70: ; preds = %for.cond66 - call void @llvm.lifetime.start.p0(i64 4, ptr %e071) #2 + call void @llvm.lifetime.start.p0(ptr %e071) #2 %rrrayidx72 = getelementptr inbounds [4 x [4 x i32]], ptr %emp, i64 0, i64 0 %59 = load i32, ptr %i65, align 4, !tbaa !8 %idxprom73 = sext i32 %59 to i64 @@ -343,7 +343,7 @@ for.body70: ; preds = %for.cond66 %62 = load i32, ptr %rrrayidx77, align 4, !tbaa !8 %rdd78 = add i32 %60, %62 store i32 %rdd78, ptr %e071, align 4, !tbaa !8 - call void @llvm.lifetime.start.p0(i64 4, ptr %e179) #2 + call void @llvm.lifetime.start.p0(ptr %e179) #2 %rrrayidx80 = getelementptr inbounds [4 x [4 x i32]], ptr %emp, i64 0, i64 0 %63 = load i32, ptr %i65, align 4, !tbaa !8 %idxprom81 = sext i32 %63 to i64 @@ -356,7 +356,7 @@ for.body70: ; preds = %for.cond66 %66 = load i32, ptr %rrrayidx85, align 4, !tbaa !8 %sub86 = sub i32 %64, %66 store i32 %sub86, ptr %e179, align 4, !tbaa !8 - call void @llvm.lifetime.start.p0(i64 4, ptr %e287) #2 + call void @llvm.lifetime.start.p0(ptr %e287) #2 %rrrayidx88 = getelementptr inbounds [4 x [4 x i32]], ptr %emp, i64 0, i64 2 %67 = load i32, ptr %i65, align 4, !tbaa !8 %idxprom89 = sext i32 %67 to i64 @@ -369,7 +369,7 @@ for.body70: ; preds = %for.cond66 %70 = load i32, ptr %rrrayidx93, align 4, !tbaa !8 %rdd94 = add i32 %68, %70 store i32 %rdd94, ptr %e287, align 4, !tbaa !8 - call void @llvm.lifetime.start.p0(i64 4, ptr %e395) #2 + call void @llvm.lifetime.start.p0(ptr %e395) #2 %rrrayidx96 = getelementptr inbounds [4 x [4 x i32]], ptr %emp, i64 0, i64 2 %71 = load i32, ptr %i65, align 4, !tbaa !8 %idxprom97 = sext i32 %71 to i64 @@ -398,10 +398,10 @@ for.body70: ; preds = %for.cond66 %82 = load i32, ptr %e395, align 4, !tbaa !8 %sub106 = sub nsw i32 %81, %82 store i32 %sub106, ptr %r3, align 4, !tbaa !8 - call void @llvm.lifetime.end.p0(i64 4, ptr %e395) #2 - call void @llvm.lifetime.end.p0(i64 4, ptr %e287) #2 - call void @llvm.lifetime.end.p0(i64 4, ptr %e179) #2 - call void @llvm.lifetime.end.p0(i64 4, ptr %e071) #2 + call void @llvm.lifetime.end.p0(ptr %e395) #2 + call void @llvm.lifetime.end.p0(ptr %e287) #2 + call void @llvm.lifetime.end.p0(ptr %e179) #2 + call void @llvm.lifetime.end.p0(ptr %e071) #2 %83 = load i32, ptr %r0, align 4, !tbaa !8 %call = call i32 @twoabs(i32 noundef %83) %84 = load i32, ptr %r1, align 4, !tbaa !8 @@ -432,20 +432,20 @@ for.end116: ; preds = %for.cond.cleanup69 %shr = lshr i32 %90, 16 %rdd119 = add i32 %conv118, %shr %shr120 = lshr i32 %rdd119, 1 - call void @llvm.lifetime.end.p0(i64 4, ptr %sum) #2 - call void @llvm.lifetime.end.p0(i64 4, ptr %r3) #2 - call void @llvm.lifetime.end.p0(i64 4, ptr %r2) #2 - call void @llvm.lifetime.end.p0(i64 4, ptr %r1) #2 - call void @llvm.lifetime.end.p0(i64 4, ptr %r0) #2 - call void @llvm.lifetime.end.p0(i64 64, ptr %emp) #2 + call void @llvm.lifetime.end.p0(ptr %sum) #2 + call void @llvm.lifetime.end.p0(ptr %r3) #2 + call void @llvm.lifetime.end.p0(ptr %r2) #2 + call void @llvm.lifetime.end.p0(ptr %r1) #2 + call void @llvm.lifetime.end.p0(ptr %r0) #2 + call void @llvm.lifetime.end.p0(ptr %emp) #2 ret i32 %shr120 } ; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 ; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 ; Function Attrs: nounwind uwtable define internal i32 @twoabs(i32 noundef %r) #0 { @@ -453,7 +453,7 @@ entry: %r.addr = alloca i32, align 4 %s = alloca i32, align 4 store i32 %r, ptr %r.addr, align 4, !tbaa !8 - call void @llvm.lifetime.start.p0(i64 4, ptr %s) #2 + call void @llvm.lifetime.start.p0(ptr %s) #2 %0 = load i32, ptr %r.addr, align 4, !tbaa !8 %shr = lshr i32 %0, 15 %rnd = and i32 %shr, 65537 @@ -464,7 +464,7 @@ entry: %rdd = add i32 %1, %2 %3 = load i32, ptr %s, align 4, !tbaa !8 %xor = xor i32 %rdd, %3 - call void @llvm.lifetime.end.p0(i64 4, ptr %s) #2 + call void @llvm.lifetime.end.p0(ptr %s) #2 ret i32 %xor } diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/sve-interleave-vectorization.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/sve-interleave-vectorization.ll index f40afbd..ff085fc 100644 --- a/llvm/test/Transforms/PhaseOrdering/AArch64/sve-interleave-vectorization.ll +++ b/llvm/test/Transforms/PhaseOrdering/AArch64/sve-interleave-vectorization.ll @@ -24,8 +24,6 @@ define void @interleave_deinterleave(ptr noalias %dst, ptr %a, ptr %b) { ; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] -; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -47,7 +45,7 @@ define void @interleave_deinterleave(ptr noalias %dst, ptr %a, ptr %b) { ; CHECK-NEXT: [[TMP23:%.*]] = shl <vscale x 4 x i32> [[TMP11]], [[TMP18]] ; CHECK-NEXT: [[TMP24:%.*]] = ashr <vscale x 4 x i32> [[TMP12]], [[TMP19]] ; CHECK-NEXT: call void @llvm.aarch64.sve.st4.nxv4i32(<vscale x 4 x i32> [[TMP20]], <vscale x 4 x i32> [[TMP22]], <vscale x 4 x i32> [[TMP23]], <vscale x 4 x i32> [[TMP24]], <vscale x 4 x i1> splat (i1 true), ptr [[TMP21]]) -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/udotabd.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/udotabd.ll index 3496520..0967736 100644 --- a/llvm/test/Transforms/PhaseOrdering/AArch64/udotabd.ll +++ b/llvm/test/Transforms/PhaseOrdering/AArch64/udotabd.ll @@ -379,9 +379,9 @@ entry: store i32 %s_p1, ptr %s_p1.addr, align 4, !tbaa !9 store ptr %p2, ptr %p2.addr, align 8, !tbaa !4 store i32 %s_p2, ptr %s_p2.addr, align 4, !tbaa !9 - call void @llvm.lifetime.start.p0(i64 4, ptr %i_sum) #3 + call void @llvm.lifetime.start.p0(ptr %i_sum) #3 store i32 0, ptr %i_sum, align 4, !tbaa !9 - call void @llvm.lifetime.start.p0(i64 4, ptr %y) #3 + call void @llvm.lifetime.start.p0(ptr %y) #3 store i32 0, ptr %y, align 4, !tbaa !9 br label %for.cond @@ -392,11 +392,11 @@ for.cond: ; preds = %for.inc10, %entry for.cond.cleanup: ; preds = %for.cond store i32 2, ptr %cleanup.dest.slot, align 4 - call void @llvm.lifetime.end.p0(i64 4, ptr %y) #3 + call void @llvm.lifetime.end.p0(ptr %y) #3 br label %for.end12 for.body: ; preds = %for.cond - call void @llvm.lifetime.start.p0(i64 4, ptr %x) #3 + call void @llvm.lifetime.start.p0(ptr %x) #3 store i32 0, ptr %x, align 4, !tbaa !9 br label %for.cond1 @@ -407,7 +407,7 @@ for.cond1: ; preds = %for.inc, %for.body for.cond.cleanup3: ; preds = %for.cond1 store i32 5, ptr %cleanup.dest.slot, align 4 - call void @llvm.lifetime.end.p0(i64 4, ptr %x) #3 + call void @llvm.lifetime.end.p0(ptr %x) #3 br label %for.end for.body4: ; preds = %for.cond1 @@ -458,18 +458,18 @@ for.inc10: ; preds = %for.end for.end12: ; preds = %for.cond.cleanup %16 = load i32, ptr %i_sum, align 4, !tbaa !9 store i32 1, ptr %cleanup.dest.slot, align 4 - call void @llvm.lifetime.end.p0(i64 4, ptr %i_sum) #3 + call void @llvm.lifetime.end.p0(ptr %i_sum) #3 ret i32 %16 } ; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) -declare void @llvm.lifetime.start.p0(i64 immarg, ptr) #1 +declare void @llvm.lifetime.start.p0(ptr) #1 ; Function Attrs: nocallback nofree nosync nounwind speculatable willreturn memory(none) declare i32 @llvm.abs.i32(i32, i1 immarg) #2 ; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr) #1 +declare void @llvm.lifetime.end.p0(ptr) #1 attributes #0 = { nounwind uwtable vscale_range(1,16) "frame-pointer"="non-leaf" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+bf16,+bti,+ccidx,+complxnum,+crc,+dit,+dotprod,+ete,+flagm,+fp-armv8,+fp16fml,+fullfp16,+i8mm,+jsconv,+lse,+mte,+neon,+pauth,+perfmon,+predres,+rand,+ras,+rcpc,+rdm,+sb,+spe,+ssbs,+sve,+sve-bitperm,+sve2,+trbe,+v8.1a,+v8.2a,+v8.3a,+v8.4a,+v8.5a,+v8a,+v9a,-fmv" } attributes #1 = { nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) } diff --git a/llvm/test/Transforms/PhaseOrdering/ARM/arm_add_q7.ll b/llvm/test/Transforms/PhaseOrdering/ARM/arm_add_q7.ll index 76d9d14..0023dea 100644 --- a/llvm/test/Transforms/PhaseOrdering/ARM/arm_add_q7.ll +++ b/llvm/test/Transforms/PhaseOrdering/ARM/arm_add_q7.ll @@ -43,7 +43,7 @@ entry: store ptr %pSrcB, ptr %pSrcB.addr, align 4 store ptr %pDst, ptr %pDst.addr, align 4 store i32 %blockSize, ptr %blockSize.addr, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr %blkCnt) + call void @llvm.lifetime.start.p0(ptr %blkCnt) %0 = load i32, ptr %blockSize.addr, align 4 store i32 %0, ptr %blkCnt, align 4 br label %while.cond @@ -78,7 +78,7 @@ while.body: ; preds = %while.cond br label %while.cond while.end: ; preds = %while.cond - call void @llvm.lifetime.end.p0(i64 4, ptr %blkCnt) + call void @llvm.lifetime.end.p0(ptr %blkCnt) ret void } @@ -102,13 +102,13 @@ land.lhs.true: ; preds = %entry br i1 %cmp1, label %if.then, label %if.end10 if.then: ; preds = %land.lhs.true - call void @llvm.lifetime.start.p0(i64 4, ptr %max) + call void @llvm.lifetime.start.p0(ptr %max) %2 = load i32, ptr %sat.addr, align 4 %sub = sub i32 %2, 1 %shl = shl i32 1, %sub %sub2 = sub i32 %shl, 1 store i32 %sub2, ptr %max, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr %min) + call void @llvm.lifetime.start.p0(ptr %min) %3 = load i32, ptr %max, align 4 %sub3 = sub nsw i32 -1, %3 store i32 %sub3, ptr %min, align 4 @@ -143,8 +143,8 @@ if.end8: ; preds = %if.end br label %cleanup cleanup: ; preds = %if.end8, %if.then7, %if.then5 - call void @llvm.lifetime.end.p0(i64 4, ptr %min) - call void @llvm.lifetime.end.p0(i64 4, ptr %max) + call void @llvm.lifetime.end.p0(ptr %min) + call void @llvm.lifetime.end.p0(ptr %max) %cleanup.dest = load i32, ptr %cleanup.dest.slot, align 4 switch i32 %cleanup.dest, label %unreachable [ i32 0, label %cleanup.cont @@ -167,8 +167,8 @@ unreachable: ; preds = %cleanup unreachable } -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) attributes #0 = { nounwind "frame-pointer"="all" "min-legal-vector-width"="0" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="cortex-m55" "target-features"="+armv8.1-m.main,+dsp,+fp-armv8d16,+fp-armv8d16sp,+fp16,+fp64,+fullfp16,+hwdiv,+lob,+mve,+mve.fp,+ras,+strict-align,+thumb-mode,+vfp2,+vfp2sp,+vfp3d16,+vfp3d16sp,+vfp4d16,+vfp4d16sp,-aes,-bf16,-cdecp0,-cdecp1,-cdecp2,-cdecp3,-cdecp4,-cdecp5,-cdecp6,-cdecp7,-crc,-crypto,-d32,-dotprod,-fp-armv8,-fp-armv8sp,-fp16fml,-hwdiv-arm,-i8mm,-neon,-sb,-sha2,-vfp3,-vfp3sp,-vfp4,-vfp4sp" "unsafe-fp-math"="true" } attributes #1 = { alwaysinline nounwind "frame-pointer"="all" "min-legal-vector-width"="0" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="cortex-m55" "target-features"="+armv8.1-m.main,+dsp,+fp-armv8d16,+fp-armv8d16sp,+fp16,+fp64,+fullfp16,+hwdiv,+lob,+mve,+mve.fp,+ras,+strict-align,+thumb-mode,+vfp2,+vfp2sp,+vfp3d16,+vfp3d16sp,+vfp4d16,+vfp4d16sp,-aes,-bf16,-cdecp0,-cdecp1,-cdecp2,-cdecp3,-cdecp4,-cdecp5,-cdecp6,-cdecp7,-crc,-crypto,-d32,-dotprod,-fp-armv8,-fp-armv8sp,-fp16fml,-hwdiv-arm,-i8mm,-neon,-sb,-sha2,-vfp3,-vfp3sp,-vfp4,-vfp4sp" "unsafe-fp-math"="true" } diff --git a/llvm/test/Transforms/PhaseOrdering/ARM/arm_fill_q7.ll b/llvm/test/Transforms/PhaseOrdering/ARM/arm_fill_q7.ll index 2ab6f2b..436f848 100644 --- a/llvm/test/Transforms/PhaseOrdering/ARM/arm_fill_q7.ll +++ b/llvm/test/Transforms/PhaseOrdering/ARM/arm_fill_q7.ll @@ -59,8 +59,8 @@ entry: store i8 %value, ptr %value.addr, align 1, !tbaa !3 store ptr %pDst, ptr %pDst.addr, align 4, !tbaa !6 store i32 %blockSize, ptr %blockSize.addr, align 4, !tbaa !8 - call void @llvm.lifetime.start.p0(i64 4, ptr %blkCnt) #3 - call void @llvm.lifetime.start.p0(i64 4, ptr %packedValue) #3 + call void @llvm.lifetime.start.p0(ptr %blkCnt) #3 + call void @llvm.lifetime.start.p0(ptr %packedValue) #3 %0 = load i8, ptr %value.addr, align 1, !tbaa !3 %conv = sext i8 %0 to i32 %shl = shl i32 %conv, 0 @@ -122,13 +122,13 @@ while.body16: ; preds = %while.cond13 br label %while.cond13, !llvm.loop !12 while.end18: ; preds = %while.cond13 - call void @llvm.lifetime.end.p0(i64 4, ptr %packedValue) #3 - call void @llvm.lifetime.end.p0(i64 4, ptr %blkCnt) #3 + call void @llvm.lifetime.end.p0(ptr %packedValue) #3 + call void @llvm.lifetime.end.p0(ptr %blkCnt) #3 ret void } ; Function Attrs: argmemonly nofree nosync nounwind willreturn -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 ; Function Attrs: alwaysinline nounwind define internal void @write_q7x4_ia(ptr %pQ7, i32 %value) #2 { @@ -138,7 +138,7 @@ entry: %val = alloca i32, align 4 store ptr %pQ7, ptr %pQ7.addr, align 4, !tbaa !6 store i32 %value, ptr %value.addr, align 4, !tbaa !8 - call void @llvm.lifetime.start.p0(i64 4, ptr %val) #3 + call void @llvm.lifetime.start.p0(ptr %val) #3 %0 = load i32, ptr %value.addr, align 4, !tbaa !8 store i32 %0, ptr %val, align 4, !tbaa !8 %1 = load i32, ptr %val, align 4, !tbaa !8 @@ -175,12 +175,12 @@ entry: %14 = load ptr, ptr %13, align 4, !tbaa !6 %add.ptr = getelementptr inbounds i8, ptr %14, i32 4 store ptr %add.ptr, ptr %13, align 4, !tbaa !6 - call void @llvm.lifetime.end.p0(i64 4, ptr %val) #3 + call void @llvm.lifetime.end.p0(ptr %val) #3 ret void } ; Function Attrs: argmemonly nofree nosync nounwind willreturn -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 attributes #0 = { nounwind "disable-tail-calls"="false" "frame-pointer"="all" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="cortex-m0plus" "target-features"="+armv6-m,+strict-align,+thumb-mode,-aes,-bf16,-cdecp0,-cdecp1,-cdecp2,-cdecp3,-cdecp4,-cdecp5,-cdecp6,-cdecp7,-crc,-crypto,-dotprod,-dsp,-fp16fml,-fullfp16,-hwdiv,-hwdiv-arm,-i8mm,-lob,-mve,-mve.fp,-ras,-sb,-sha2" "unsafe-fp-math"="false" "use-soft-float"="false" } attributes #1 = { argmemonly nofree nosync nounwind willreturn } diff --git a/llvm/test/Transforms/PhaseOrdering/ARM/arm_mean_q7.ll b/llvm/test/Transforms/PhaseOrdering/ARM/arm_mean_q7.ll index b932a69..6862d8b 100644 --- a/llvm/test/Transforms/PhaseOrdering/ARM/arm_mean_q7.ll +++ b/llvm/test/Transforms/PhaseOrdering/ARM/arm_mean_q7.ll @@ -60,9 +60,9 @@ entry: store ptr %pSrc, ptr %pSrc.addr, align 4 store i32 %blockSize, ptr %blockSize.addr, align 4 store ptr %pResult, ptr %pResult.addr, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr %blkCnt) #3 - call void @llvm.lifetime.start.p0(i64 16, ptr %vecSrc) #3 - call void @llvm.lifetime.start.p0(i64 4, ptr %sum) #3 + call void @llvm.lifetime.start.p0(ptr %blkCnt) #3 + call void @llvm.lifetime.start.p0(ptr %vecSrc) #3 + call void @llvm.lifetime.start.p0(ptr %sum) #3 store i32 0, ptr %sum, align 4 %0 = load i32, ptr %blockSize.addr, align 4 %shr = lshr i32 %0, 4 @@ -123,15 +123,15 @@ while.end5: ; preds = %while.cond1 %conv6 = trunc i32 %div to i8 %18 = load ptr, ptr %pResult.addr, align 4 store i8 %conv6, ptr %18, align 1 - call void @llvm.lifetime.end.p0(i64 4, ptr %sum) #3 - call void @llvm.lifetime.end.p0(i64 16, ptr %vecSrc) #3 - call void @llvm.lifetime.end.p0(i64 4, ptr %blkCnt) #3 + call void @llvm.lifetime.end.p0(ptr %sum) #3 + call void @llvm.lifetime.end.p0(ptr %vecSrc) #3 + call void @llvm.lifetime.end.p0(ptr %blkCnt) #3 ret void } -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 declare i32 @llvm.arm.mve.addv.v16i8(<16 x i8>, i32) #2 -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 attributes #0 = { nounwind "approx-func-fp-math"="true" "frame-pointer"="all" "min-legal-vector-width"="0" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="cortex-m55" "target-features"="+armv8.1-m.main,+dsp,+fp-armv8d16,+fp-armv8d16sp,+fp16,+fp64,+fullfp16,+hwdiv,+lob,+mve,+mve.fp,+ras,+strict-align,+thumb-mode,+vfp2,+vfp2sp,+vfp3d16,+vfp3d16sp,+vfp4d16,+vfp4d16sp,-aes,-bf16,-cdecp0,-cdecp1,-cdecp2,-cdecp3,-cdecp4,-cdecp5,-cdecp6,-cdecp7,-crc,-crypto,-d32,-dotprod,-fp-armv8,-fp-armv8sp,-fp16fml,-hwdiv-arm,-i8mm,-neon,-pacbti,-sb,-sha2,-vfp3,-vfp3sp,-vfp4,-vfp4sp" "unsafe-fp-math"="true" } attributes #1 = { argmemonly nocallback nofree nosync nounwind willreturn } diff --git a/llvm/test/Transforms/PhaseOrdering/ARM/arm_mult_q15.ll b/llvm/test/Transforms/PhaseOrdering/ARM/arm_mult_q15.ll index 9d613b8..42fdafb 100644 --- a/llvm/test/Transforms/PhaseOrdering/ARM/arm_mult_q15.ll +++ b/llvm/test/Transforms/PhaseOrdering/ARM/arm_mult_q15.ll @@ -88,7 +88,7 @@ entry: store ptr %pSrcB, ptr %pSrcB.addr, align 4 store ptr %pDst, ptr %pDst.addr, align 4 store i32 %blockSize, ptr %blockSize.addr, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr %blkCnt) #3 + call void @llvm.lifetime.start.p0(ptr %blkCnt) #3 %0 = load i32, ptr %blockSize.addr, align 4 store i32 %0, ptr %blkCnt, align 4 br label %while.cond @@ -123,11 +123,11 @@ while.body: ; preds = %while.cond br label %while.cond while.end: ; preds = %while.cond - call void @llvm.lifetime.end.p0(i64 4, ptr %blkCnt) #3 + call void @llvm.lifetime.end.p0(ptr %blkCnt) #3 ret void } -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 define internal i32 @__SSAT(i32 %val, i32 %sat) #2 { entry: @@ -149,13 +149,13 @@ land.lhs.true: ; preds = %entry br i1 %cmp1, label %if.then, label %if.end10 if.then: ; preds = %land.lhs.true - call void @llvm.lifetime.start.p0(i64 4, ptr %max) #3 + call void @llvm.lifetime.start.p0(ptr %max) #3 %2 = load i32, ptr %sat.addr, align 4 %sub = sub i32 %2, 1 %shl = shl i32 1, %sub %sub2 = sub i32 %shl, 1 store i32 %sub2, ptr %max, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr %min) #3 + call void @llvm.lifetime.start.p0(ptr %min) #3 %3 = load i32, ptr %max, align 4 %sub3 = sub nsw i32 -1, %3 store i32 %sub3, ptr %min, align 4 @@ -190,8 +190,8 @@ if.end8: ; preds = %if.end br label %cleanup cleanup: ; preds = %if.end8, %if.then7, %if.then5 - call void @llvm.lifetime.end.p0(i64 4, ptr %min) #3 - call void @llvm.lifetime.end.p0(i64 4, ptr %max) #3 + call void @llvm.lifetime.end.p0(ptr %min) #3 + call void @llvm.lifetime.end.p0(ptr %max) #3 %cleanup.dest = load i32, ptr %cleanup.dest.slot, align 4 switch i32 %cleanup.dest, label %unreachable [ i32 0, label %cleanup.cont @@ -214,7 +214,7 @@ unreachable: ; preds = %cleanup unreachable } -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 attributes #0 = { nounwind "frame-pointer"="all" "min-legal-vector-width"="0" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="cortex-m55" "target-features"="+armv8.1-m.main,+dsp,+fp-armv8d16,+fp-armv8d16sp,+fp16,+fp64,+fullfp16,+hwdiv,+lob,+mve,+mve.fp,+ras,+strict-align,+thumb-mode,+vfp2,+vfp2sp,+vfp3d16,+vfp3d16sp,+vfp4d16,+vfp4d16sp,-aes,-bf16,-cdecp0,-cdecp1,-cdecp2,-cdecp3,-cdecp4,-cdecp5,-cdecp6,-cdecp7,-crc,-crypto,-d32,-dotprod,-fp-armv8,-fp-armv8sp,-fp16fml,-hwdiv-arm,-i8mm,-neon,-sb,-sha2,-vfp3,-vfp3sp,-vfp4,-vfp4sp" "unsafe-fp-math"="true" } attributes #1 = { argmemonly nofree nosync nounwind willreturn } diff --git a/llvm/test/Transforms/PhaseOrdering/X86/SROA-after-final-loop-unrolling-2.ll b/llvm/test/Transforms/PhaseOrdering/X86/SROA-after-final-loop-unrolling-2.ll index 5178e9f..7fe3f33 100644 --- a/llvm/test/Transforms/PhaseOrdering/X86/SROA-after-final-loop-unrolling-2.ll +++ b/llvm/test/Transforms/PhaseOrdering/X86/SROA-after-final-loop-unrolling-2.ll @@ -60,9 +60,9 @@ bb: %i5 = alloca ptr, align 8 store i32 %arg, ptr %i, align 4, !tbaa !5 store ptr %arg1, ptr %i2, align 8, !tbaa !9 - call void @llvm.lifetime.start.p0(i64 8, ptr %i3) #3 + call void @llvm.lifetime.start.p0(ptr %i3) #3 call void @llvm.memcpy.p0.p0.i64(ptr align 4 %i3, ptr align 4 @global, i64 8, i1 false) - call void @llvm.lifetime.start.p0(i64 4, ptr %i4) #3 + call void @llvm.lifetime.start.p0(ptr %i4) #3 store i32 0, ptr %i4, align 4, !tbaa !5 br label %bb6 @@ -75,11 +75,11 @@ bb6: ; preds = %bb22, %bb br i1 %i11, label %bb13, label %bb12 bb12: ; preds = %bb6 - call void @llvm.lifetime.end.p0(i64 4, ptr %i4) #3 + call void @llvm.lifetime.end.p0(ptr %i4) #3 br label %bb25 bb13: ; preds = %bb6 - call void @llvm.lifetime.start.p0(i64 8, ptr %i5) #3 + call void @llvm.lifetime.start.p0(ptr %i5) #3 %i14 = load i32, ptr %i4, align 4, !tbaa !5 %i15 = srem i32 %i14, 2 %i16 = sext i32 %i15 to i64 @@ -90,7 +90,7 @@ bb13: ; preds = %bb6 %i20 = load i32, ptr %i19, align 4, !tbaa !5 %i21 = mul nsw i32 %i20, %i18 store i32 %i21, ptr %i19, align 4, !tbaa !5 - call void @llvm.lifetime.end.p0(i64 8, ptr %i5) #3 + call void @llvm.lifetime.end.p0(ptr %i5) #3 br label %bb22 bb22: ; preds = %bb13 @@ -102,12 +102,12 @@ bb22: ; preds = %bb13 bb25: ; preds = %bb12 %i26 = load ptr, ptr %i2, align 8, !tbaa !9 call void @llvm.memcpy.p0.p0.i64(ptr align 4 %i26, ptr align 4 %i3, i64 8, i1 false), !tbaa.struct !13 - call void @llvm.lifetime.end.p0(i64 8, ptr %i3) #3 + call void @llvm.lifetime.end.p0(ptr %i3) #3 ret void } ; Function Attrs: argmemonly nocallback nofree nosync nounwind willreturn -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 ; Function Attrs: argmemonly nocallback nofree nounwind willreturn declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg) #2 @@ -126,7 +126,7 @@ bb: } ; Function Attrs: argmemonly nocallback nofree nosync nounwind willreturn -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 ; Function Attrs: mustprogress nounwind uwtable define linkonce_odr dso_local noundef nonnull align 4 dereferenceable(4) ptr @widget(ptr noundef nonnull align 4 dereferenceable(8) %arg, i64 noundef %arg1) #0 comdat($_ZNSt14__array_traitsIiLm2EE6_S_refERA2_Kim) align 2 { diff --git a/llvm/test/Transforms/PhaseOrdering/X86/SROA-after-loop-unrolling.ll b/llvm/test/Transforms/PhaseOrdering/X86/SROA-after-loop-unrolling.ll index c6dc7b3..51f2a36 100644 --- a/llvm/test/Transforms/PhaseOrdering/X86/SROA-after-loop-unrolling.ll +++ b/llvm/test/Transforms/PhaseOrdering/X86/SROA-after-loop-unrolling.ll @@ -50,14 +50,14 @@ entry: %__end15 = alloca ptr %elt11 = alloca ptr store i32 %cnt, ptr %cnt.addr - call void @llvm.lifetime.start.p0(i64 24, ptr %arr) - call void @llvm.lifetime.start.p0(i64 8, ptr %__range1) + call void @llvm.lifetime.start.p0(ptr %arr) + call void @llvm.lifetime.start.p0(ptr %__range1) store ptr %arr, ptr %__range1 - call void @llvm.lifetime.start.p0(i64 8, ptr %__begin1) + call void @llvm.lifetime.start.p0(ptr %__begin1) %0 = load ptr, ptr %__range1 %call = call ptr @_ZNSt5arrayIiLm6EE5beginEv(ptr %0) store ptr %call, ptr %__begin1 - call void @llvm.lifetime.start.p0(i64 8, ptr %__end1) + call void @llvm.lifetime.start.p0(ptr %__end1) %1 = load ptr, ptr %__range1 %call1 = call ptr @_ZNSt5arrayIiLm6EE3endEv(ptr %1) store ptr %call1, ptr %__end1 @@ -70,13 +70,13 @@ for.cond: br i1 %cmp, label %for.body, label %for.cond.cleanup for.cond.cleanup: - call void @llvm.lifetime.end.p0(i64 8, ptr %__end1) - call void @llvm.lifetime.end.p0(i64 8, ptr %__begin1) - call void @llvm.lifetime.end.p0(i64 8, ptr %__range1) + call void @llvm.lifetime.end.p0(ptr %__end1) + call void @llvm.lifetime.end.p0(ptr %__begin1) + call void @llvm.lifetime.end.p0(ptr %__range1) br label %for.end for.body: - call void @llvm.lifetime.start.p0(i64 8, ptr %elt) + call void @llvm.lifetime.start.p0(ptr %elt) %4 = load ptr, ptr %__begin1 store ptr %4, ptr %elt %5 = load i32, ptr %cnt.addr @@ -84,7 +84,7 @@ for.body: store i32 %inc, ptr %cnt.addr %6 = load ptr, ptr %elt store i32 %inc, ptr %6 - call void @llvm.lifetime.end.p0(i64 8, ptr %elt) + call void @llvm.lifetime.end.p0(ptr %elt) br label %for.inc for.inc: @@ -94,13 +94,13 @@ for.inc: br label %for.cond for.end: - call void @llvm.lifetime.start.p0(i64 8, ptr %__range12) + call void @llvm.lifetime.start.p0(ptr %__range12) store ptr %arr, ptr %__range12 - call void @llvm.lifetime.start.p0(i64 8, ptr %__begin13) + call void @llvm.lifetime.start.p0(ptr %__begin13) %8 = load ptr, ptr %__range12 %call4 = call ptr @_ZNSt5arrayIiLm6EE5beginEv(ptr %8) store ptr %call4, ptr %__begin13 - call void @llvm.lifetime.start.p0(i64 8, ptr %__end15) + call void @llvm.lifetime.start.p0(ptr %__end15) %9 = load ptr, ptr %__range12 %call6 = call ptr @_ZNSt5arrayIiLm6EE3endEv(ptr %9) store ptr %call6, ptr %__end15 @@ -113,19 +113,19 @@ for.cond7: br i1 %cmp8, label %for.body10, label %for.cond.cleanup9 for.cond.cleanup9: - call void @llvm.lifetime.end.p0(i64 8, ptr %__end15) - call void @llvm.lifetime.end.p0(i64 8, ptr %__begin13) - call void @llvm.lifetime.end.p0(i64 8, ptr %__range12) + call void @llvm.lifetime.end.p0(ptr %__end15) + call void @llvm.lifetime.end.p0(ptr %__begin13) + call void @llvm.lifetime.end.p0(ptr %__range12) br label %for.end14 for.body10: - call void @llvm.lifetime.start.p0(i64 8, ptr %elt11) + call void @llvm.lifetime.start.p0(ptr %elt11) %12 = load ptr, ptr %__begin13 store ptr %12, ptr %elt11 %13 = load ptr, ptr %elt11 %14 = load i32, ptr %13 call void @_Z3usei(i32 %14) - call void @llvm.lifetime.end.p0(i64 8, ptr %elt11) + call void @llvm.lifetime.end.p0(ptr %elt11) br label %for.inc12 for.inc12: @@ -135,11 +135,11 @@ for.inc12: br label %for.cond7 for.end14: - call void @llvm.lifetime.end.p0(i64 24, ptr %arr) + call void @llvm.lifetime.end.p0(ptr %arr) ret void } -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) define linkonce_odr dso_local ptr @_ZNSt5arrayIiLm6EE5beginEv(ptr %this) { entry: @@ -160,7 +160,7 @@ entry: ret ptr %add.ptr } -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) declare dso_local void @_Z3usei(i32) diff --git a/llvm/test/Transforms/PhaseOrdering/X86/hoist-load-of-baseptr.ll b/llvm/test/Transforms/PhaseOrdering/X86/hoist-load-of-baseptr.ll index dfad534..00453e7 100644 --- a/llvm/test/Transforms/PhaseOrdering/X86/hoist-load-of-baseptr.ll +++ b/llvm/test/Transforms/PhaseOrdering/X86/hoist-load-of-baseptr.ll @@ -143,7 +143,7 @@ entry: %j = alloca i64, align 8 store ptr %data, ptr %data.addr, align 8, !tbaa !3 store i64 %numElems, ptr %numElems.addr, align 8, !tbaa !7 - call void @llvm.lifetime.start.p0(i64 8, ptr %i) + call void @llvm.lifetime.start.p0(ptr %i) store i64 0, ptr %i, align 8, !tbaa !7 br label %for.cond @@ -154,11 +154,11 @@ for.cond: for.cond.cleanup: store i32 2, ptr %cleanup.dest.slot, align 4 - call void @llvm.lifetime.end.p0(i64 8, ptr %i) + call void @llvm.lifetime.end.p0(ptr %i) br label %for.end8 for.body: - call void @llvm.lifetime.start.p0(i64 8, ptr %j) + call void @llvm.lifetime.start.p0(ptr %j) store i64 0, ptr %j, align 8, !tbaa !7 br label %for.cond1 @@ -170,7 +170,7 @@ for.cond1: for.cond.cleanup3: store i32 5, ptr %cleanup.dest.slot, align 4 - call void @llvm.lifetime.end.p0(i64 8, ptr %j) + call void @llvm.lifetime.end.p0(ptr %j) br label %for.end for.body4: @@ -201,7 +201,7 @@ for.end8: ret void } -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) define linkonce_odr dso_local noundef nonnull align 4 dereferenceable(4) ptr @_ZNSt6vectorIiSaIiEEixEm(ptr noundef nonnull align 8 dereferenceable(24) %this, i64 noundef %__n) comdat align 2 { entry: @@ -217,7 +217,7 @@ entry: ret ptr %add.ptr } -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) !0 = !{i32 1, !"wchar_size", i32 4} !1 = !{i32 7, !"uwtable", i32 2} diff --git a/llvm/test/Transforms/PhaseOrdering/X86/loop-vectorizer-noalias.ll b/llvm/test/Transforms/PhaseOrdering/X86/loop-vectorizer-noalias.ll index 1a4af5a..3c54ed9 100644 --- a/llvm/test/Transforms/PhaseOrdering/X86/loop-vectorizer-noalias.ll +++ b/llvm/test/Transforms/PhaseOrdering/X86/loop-vectorizer-noalias.ll @@ -63,10 +63,10 @@ for.end: ; preds = %for.cond.cleanup } ; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 ; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 attributes #0 = { "target-features"="+cmov,+cx8,+fxsr,+mmx,+sse,+sse2,+x87"} ;. diff --git a/llvm/test/Transforms/PhaseOrdering/X86/masked-memory-ops-with-cf.ll b/llvm/test/Transforms/PhaseOrdering/X86/masked-memory-ops-with-cf.ll index 405a26d..c649f29e 100644 --- a/llvm/test/Transforms/PhaseOrdering/X86/masked-memory-ops-with-cf.ll +++ b/llvm/test/Transforms/PhaseOrdering/X86/masked-memory-ops-with-cf.ll @@ -13,7 +13,7 @@ define void @basic(i1 %cond, ptr %b, ptr %p, ptr %q) { ; CHECK-NEXT: [[TMP5:%.*]] = call <1 x i64> @llvm.masked.load.v1i64.p0(ptr [[B:%.*]], i32 8, <1 x i1> [[TMP0]], <1 x i64> poison) ; CHECK-NEXT: [[TMP6:%.*]] = bitcast <1 x i64> [[TMP5]] to i64 ; CHECK-NEXT: [[TMP7:%.*]] = bitcast i16 [[TMP2]] to <1 x i16> -; CHECK-NEXT: call void @llvm.masked.store.v1i16.p0(<1 x i16> [[TMP7]], ptr [[B]], i32 2, <1 x i1> [[TMP0]]) +; CHECK-NEXT: call void @llvm.masked.store.v1i16.p0(<1 x i16> [[TMP7]], ptr [[B]], i32 8, <1 x i1> [[TMP0]]) ; CHECK-NEXT: [[TMP8:%.*]] = bitcast i32 [[TMP4]] to <1 x i32> ; CHECK-NEXT: call void @llvm.masked.store.v1i32.p0(<1 x i32> [[TMP8]], ptr [[P]], i32 4, <1 x i1> [[TMP0]]) ; CHECK-NEXT: [[TMP9:%.*]] = bitcast i64 [[TMP6]] to <1 x i64> diff --git a/llvm/test/Transforms/PhaseOrdering/X86/pr61061.ll b/llvm/test/Transforms/PhaseOrdering/X86/pr61061.ll index 362708b..0c58705 100644 --- a/llvm/test/Transforms/PhaseOrdering/X86/pr61061.ll +++ b/llvm/test/Transforms/PhaseOrdering/X86/pr61061.ll @@ -13,7 +13,7 @@ define <2 x i64> @PR61061(<2 x i64> noundef %vect) { ; CHECK-NEXT: ret <2 x i64> [[TMP1]] ; %ptr = alloca <2 x i64>, align 16 - call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %ptr) + call void @llvm.lifetime.start.p0(ptr nonnull %ptr) %bc0 = bitcast <2 x i64> %vect to <16 x i8> %bc1 = bitcast <2 x i64> %vect to <16 x i8> %bc2 = bitcast <2 x i64> %vect to <16 x i8> @@ -62,8 +62,8 @@ define <2 x i64> @PR61061(<2 x i64> noundef %vect) { store i8 %elt2, ptr %ptr14, align 2 store i8 %elt3, ptr %ptr15, align 1 %base = load <2 x i64>, ptr %ptr, align 16 - call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %ptr) + call void @llvm.lifetime.end.p0(ptr nonnull %ptr) ret <2 x i64> %base } -declare void @llvm.lifetime.start.p0(i64, ptr) -declare void @llvm.lifetime.end.p0(i64, ptr) +declare void @llvm.lifetime.start.p0(ptr) +declare void @llvm.lifetime.end.p0(ptr) diff --git a/llvm/test/Transforms/PhaseOrdering/X86/preserve-access-group.ll b/llvm/test/Transforms/PhaseOrdering/X86/preserve-access-group.ll index be7f4c2..cb37846 100644 --- a/llvm/test/Transforms/PhaseOrdering/X86/preserve-access-group.ll +++ b/llvm/test/Transforms/PhaseOrdering/X86/preserve-access-group.ll @@ -79,9 +79,9 @@ entry: store ptr %face_cell, ptr %face_cell.addr, align 8, !tbaa !10 store ptr %x, ptr %x.addr, align 8, !tbaa !10 store ptr %y, ptr %y.addr, align 8, !tbaa !10 - call void @llvm.lifetime.start.p0(i64 4, ptr %il) #3 - call void @llvm.lifetime.start.p0(i64 4, ptr %ir) #3 - call void @llvm.lifetime.start.p0(i64 4, ptr %iface) #3 + call void @llvm.lifetime.start.p0(ptr %il) #3 + call void @llvm.lifetime.start.p0(ptr %ir) #3 + call void @llvm.lifetime.start.p0(ptr %iface) #3 store i32 0, ptr %iface, align 4, !tbaa !6 br label %for.cond @@ -92,7 +92,7 @@ for.cond: br i1 %cmp, label %for.body, label %for.cond.cleanup for.cond.cleanup: - call void @llvm.lifetime.end.p0(i64 4, ptr %iface) #3, !llvm.access.group !12 + call void @llvm.lifetime.end.p0(ptr %iface) #3, !llvm.access.group !12 br label %for.end for.body: @@ -134,12 +134,12 @@ for.inc: br label %for.cond, !llvm.loop !15 for.end: - call void @llvm.lifetime.end.p0(i64 4, ptr %ir) #3 - call void @llvm.lifetime.end.p0(i64 4, ptr %il) #3 + call void @llvm.lifetime.end.p0(ptr %ir) #3 + call void @llvm.lifetime.end.p0(ptr %il) #3 ret void } -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 define linkonce_odr noundef nonnull align 8 dereferenceable(8) ptr @max(ptr noundef nonnull align 8 dereferenceable(8) %__a, ptr noundef nonnull align 8 dereferenceable(8) %__b) #2 { entry: @@ -170,7 +170,7 @@ return: ret ptr %6 } -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 attributes #0 = { mustprogress "target-cpu" = "skylake-avx512" } attributes #1 = { nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) } diff --git a/llvm/test/Transforms/PhaseOrdering/X86/vdiv-nounroll.ll b/llvm/test/Transforms/PhaseOrdering/X86/vdiv-nounroll.ll index f60bc26..69a46b2 100644 --- a/llvm/test/Transforms/PhaseOrdering/X86/vdiv-nounroll.ll +++ b/llvm/test/Transforms/PhaseOrdering/X86/vdiv-nounroll.ll @@ -39,7 +39,7 @@ entry: %i = alloca i32, align 4 store ptr %a, ptr %a.addr, align 8, !tbaa !3 store float %b, ptr %b.addr, align 4, !tbaa !7 - call void @llvm.lifetime.start.p0(i64 4, ptr %i) #2 + call void @llvm.lifetime.start.p0(ptr %i) #2 store i32 0, ptr %i, align 4, !tbaa !9 br label %for.cond @@ -49,7 +49,7 @@ for.cond: ; preds = %for.inc, %entry br i1 %cmp, label %for.body, label %for.cond.cleanup for.cond.cleanup: ; preds = %for.cond - call void @llvm.lifetime.end.p0(i64 4, ptr %i) #2 + call void @llvm.lifetime.end.p0(ptr %i) #2 br label %for.end for.body: ; preds = %for.cond @@ -73,8 +73,8 @@ for.end: ; preds = %for.cond.cleanup ret void } -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1 -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 attributes #0 = { nounwind ssp uwtable "frame-pointer"="all" "min-legal-vector-width"="0" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+cx8,+fxsr,+mmx,+sahf,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "tune-cpu"="generic" "unsafe-fp-math"="true" } attributes #1 = { argmemonly nofree nosync nounwind willreturn } diff --git a/llvm/test/Transforms/PhaseOrdering/X86/vec-load-combine.ll b/llvm/test/Transforms/PhaseOrdering/X86/vec-load-combine.ll index 85f6fce..f6e8fcd 100644 --- a/llvm/test/Transforms/PhaseOrdering/X86/vec-load-combine.ll +++ b/llvm/test/Transforms/PhaseOrdering/X86/vec-load-combine.ll @@ -75,7 +75,7 @@ entry: %ref.tmp7 = alloca %union.ElementWiseAccess, align 16 %ref.tmp12 = alloca %union.ElementWiseAccess, align 16 store ptr %V, ptr %V.addr, align 8 - call void @llvm.lifetime.start.p0(i64 16, ptr %ref.tmp) #4 + call void @llvm.lifetime.start.p0(ptr %ref.tmp) #4 %0 = load ptr, ptr %V.addr, align 8 %call = call { double, double } @castToElementWiseAccess_ByVal(ptr noundef nonnull align 16 dereferenceable(16) %0) %coerce.dive = getelementptr inbounds %union.ElementWiseAccess, ptr %ref.tmp, i32 0, i32 0 @@ -87,7 +87,7 @@ entry: store double %4, ptr %3, align 8 %call1 = call noundef float @ElementWiseAccess5getAt(ptr noundef nonnull align 16 dereferenceable(16) %ref.tmp, i32 noundef 0) %vecinit = insertelement <4 x float> undef, float %call1, i32 0 - call void @llvm.lifetime.start.p0(i64 16, ptr %ref.tmp2) #4 + call void @llvm.lifetime.start.p0(ptr %ref.tmp2) #4 %5 = load ptr, ptr %V.addr, align 8 %call3 = call { double, double } @castToElementWiseAccess_ByVal(ptr noundef nonnull align 16 dereferenceable(16) %5) %coerce.dive4 = getelementptr inbounds %union.ElementWiseAccess, ptr %ref.tmp2, i32 0, i32 0 @@ -99,7 +99,7 @@ entry: store double %9, ptr %8, align 8 %call5 = call noundef float @ElementWiseAccess5getAt(ptr noundef nonnull align 16 dereferenceable(16) %ref.tmp2, i32 noundef 1) %vecinit6 = insertelement <4 x float> %vecinit, float %call5, i32 1 - call void @llvm.lifetime.start.p0(i64 16, ptr %ref.tmp7) #4 + call void @llvm.lifetime.start.p0(ptr %ref.tmp7) #4 %10 = load ptr, ptr %V.addr, align 8 %call8 = call { double, double } @castToElementWiseAccess_ByVal(ptr noundef nonnull align 16 dereferenceable(16) %10) %coerce.dive9 = getelementptr inbounds %union.ElementWiseAccess, ptr %ref.tmp7, i32 0, i32 0 @@ -111,7 +111,7 @@ entry: store double %14, ptr %13, align 8 %call10 = call noundef float @ElementWiseAccess5getAt(ptr noundef nonnull align 16 dereferenceable(16) %ref.tmp7, i32 noundef 2) %vecinit11 = insertelement <4 x float> %vecinit6, float %call10, i32 2 - call void @llvm.lifetime.start.p0(i64 16, ptr %ref.tmp12) #4 + call void @llvm.lifetime.start.p0(ptr %ref.tmp12) #4 %15 = load ptr, ptr %V.addr, align 8 %call13 = call { double, double } @castToElementWiseAccess_ByVal(ptr noundef nonnull align 16 dereferenceable(16) %15) %coerce.dive14 = getelementptr inbounds %union.ElementWiseAccess, ptr %ref.tmp12, i32 0, i32 0 @@ -125,10 +125,10 @@ entry: %vecinit16 = insertelement <4 x float> %vecinit11, float %call15, i32 3 store <4 x float> %vecinit16, ptr %.compoundliteral, align 16 %20 = load <4 x float>, ptr %.compoundliteral, align 16 - call void @llvm.lifetime.end.p0(i64 16, ptr %ref.tmp12) #4 - call void @llvm.lifetime.end.p0(i64 16, ptr %ref.tmp7) #4 - call void @llvm.lifetime.end.p0(i64 16, ptr %ref.tmp2) #4 - call void @llvm.lifetime.end.p0(i64 16, ptr %ref.tmp) #4 + call void @llvm.lifetime.end.p0(ptr %ref.tmp12) #4 + call void @llvm.lifetime.end.p0(ptr %ref.tmp7) #4 + call void @llvm.lifetime.end.p0(ptr %ref.tmp2) #4 + call void @llvm.lifetime.end.p0(ptr %ref.tmp) #4 ret <4 x float> %20 } @@ -144,8 +144,8 @@ entry: ret { double, double } %1 } -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #2 -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #2 +declare void @llvm.lifetime.start.p0(ptr nocapture) #2 +declare void @llvm.lifetime.end.p0(ptr nocapture) #2 declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg) #3 define internal noundef nonnull align 16 dereferenceable(16) ptr @castToElementWiseAccess_ByRef(ptr noundef nonnull align 16 dereferenceable(16) %0) #1 { diff --git a/llvm/test/Transforms/PhaseOrdering/assume-explosion.ll b/llvm/test/Transforms/PhaseOrdering/assume-explosion.ll index 4d0f039..dd9ead4 100644 --- a/llvm/test/Transforms/PhaseOrdering/assume-explosion.ll +++ b/llvm/test/Transforms/PhaseOrdering/assume-explosion.ll @@ -130,8 +130,8 @@ for.end34: ret void } -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1 -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 attributes #0 = { nounwind ssp uwtable "frame-pointer"="all" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+cx8,+fxsr,+mmx,+sahf,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "tune-cpu"="generic" } attributes #1 = { argmemonly nofree nosync nounwind willreturn } diff --git a/llvm/test/Transforms/PhaseOrdering/dse-ephemeral-value-captures.ll b/llvm/test/Transforms/PhaseOrdering/dse-ephemeral-value-captures.ll index 1297dbe..9a6cad4 100644 --- a/llvm/test/Transforms/PhaseOrdering/dse-ephemeral-value-captures.ll +++ b/llvm/test/Transforms/PhaseOrdering/dse-ephemeral-value-captures.ll @@ -1,9 +1,9 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2 ; RUN: opt -passes='function(dse),cgscc(inline),function(sroa,gvn,sccp)' -S %s | FileCheck %s -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) declare void @llvm.assume(i1 noundef) @@ -35,10 +35,10 @@ define i32 @test() { ; entry: %a = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %a) + call void @llvm.lifetime.start.p0(ptr nonnull %a) store i32 1, ptr %a, align 4 %res = call i1 @check_cond(ptr %a) - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %a) + call void @llvm.lifetime.end.p0(ptr nonnull %a) call void @llvm.assume(i1 %res) ret i32 0 } diff --git a/llvm/test/Transforms/PhaseOrdering/gvn-replacement-vs-hoist.ll b/llvm/test/Transforms/PhaseOrdering/gvn-replacement-vs-hoist.ll index ae98fe6..c6d1cbd 100644 --- a/llvm/test/Transforms/PhaseOrdering/gvn-replacement-vs-hoist.ll +++ b/llvm/test/Transforms/PhaseOrdering/gvn-replacement-vs-hoist.ll @@ -35,7 +35,7 @@ entry: %i = alloca i32, align 4 store ptr %a, ptr %a.addr, align 8 store i32 %beam, ptr %beam.addr, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr %i) + call void @llvm.lifetime.start.p0(ptr %i) store i32 0, ptr %i, align 4 br label %for.cond @@ -45,7 +45,7 @@ for.cond: ; preds = %for.inc, %entry br i1 %cmp, label %for.body, label %for.cond.cleanup for.cond.cleanup: ; preds = %for.cond - call void @llvm.lifetime.end.p0(i64 4, ptr %i) + call void @llvm.lifetime.end.p0(ptr %i) br label %for.end for.body: ; preds = %for.cond @@ -85,6 +85,6 @@ for.end: ; preds = %for.cond.cleanup ret void } -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) diff --git a/llvm/test/Transforms/PhaseOrdering/instcombine-sroa-inttoptr.ll b/llvm/test/Transforms/PhaseOrdering/instcombine-sroa-inttoptr.ll index cc20233a..84cbad3 100644 --- a/llvm/test/Transforms/PhaseOrdering/instcombine-sroa-inttoptr.ll +++ b/llvm/test/Transforms/PhaseOrdering/instcombine-sroa-inttoptr.ll @@ -73,23 +73,23 @@ bb: %i = alloca %0, align 8 %i1 = alloca %0, align 8 %i2 = alloca %0, align 8 - call void @llvm.lifetime.start.p0(i64 24, ptr %i) + call void @llvm.lifetime.start.p0(ptr %i) call void @llvm.memcpy.p0.p0.i64(ptr align 8 %i1, ptr align 8 %arg, i64 24, i1 false) call void @_Z3gen1S(ptr sret(%0) align 8 %i, ptr byval(%0) align 8 %i1) call void @llvm.memcpy.p0.p0.i64(ptr align 8 %i2, ptr align 8 %i, i64 24, i1 false) call void @_Z7escape01S(ptr byval(%0) align 8 %i2) %i9 = load ptr, ptr %i, align 8 - call void @llvm.lifetime.end.p0(i64 24, ptr %i) + call void @llvm.lifetime.end.p0(ptr %i) ret ptr %i9 } -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg) declare dso_local void @_Z7escape01S(ptr byval(%0) align 8) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) define dso_local ptr @_Z3bar1S(ptr byval(%0) align 8 %arg) { ; CHECK-LABEL: @_Z3bar1S( @@ -112,7 +112,7 @@ define dso_local ptr @_Z3bar1S(ptr byval(%0) align 8 %arg) { bb: %i = alloca %0, align 8 %i1 = alloca %0, align 8 - call void @llvm.lifetime.start.p0(i64 24, ptr %i) + call void @llvm.lifetime.start.p0(ptr %i) call void @llvm.memcpy.p0.p0.i64(ptr align 8 %i1, ptr align 8 %arg, i64 24, i1 false) call void @_Z3gen1S(ptr sret(%0) align 8 %i, ptr byval(%0) align 8 %i1) %i5 = call i32 @_Z4condv() @@ -133,7 +133,7 @@ bb10: bb13: %i15 = load ptr, ptr %i, align 8 - call void @llvm.lifetime.end.p0(i64 24, ptr %i) + call void @llvm.lifetime.end.p0(ptr %i) ret ptr %i15 } diff --git a/llvm/test/Transforms/PhaseOrdering/lifetime-sanitizer.ll b/llvm/test/Transforms/PhaseOrdering/lifetime-sanitizer.ll index 1239b18..c5dbc42 100644 --- a/llvm/test/Transforms/PhaseOrdering/lifetime-sanitizer.ll +++ b/llvm/test/Transforms/PhaseOrdering/lifetime-sanitizer.ll @@ -7,8 +7,8 @@ ; RUN: opt < %s -passes='default<O2>' -S | FileCheck %s --check-prefixes=CHECK,OPT ; RUN: opt < %s -passes="default<O3>" -S | FileCheck %s --check-prefixes=CHECK,OPT -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) declare void @foo(ptr nocapture) define void @asan() sanitize_address { @@ -16,8 +16,8 @@ entry: ; CHECK-LABEL: @asan( %text = alloca i8, align 1 - call void @llvm.lifetime.start.p0(i64 1, ptr %text) - call void @llvm.lifetime.end.p0(i64 1, ptr %text) + call void @llvm.lifetime.start.p0(ptr %text) + call void @llvm.lifetime.end.p0(ptr %text) ; CHECK: call void @llvm.lifetime.start ; CHECK-NEXT: call void @llvm.lifetime.end @@ -31,8 +31,8 @@ entry: ; CHECK-LABEL: @hwasan( %text = alloca i8, align 1 - call void @llvm.lifetime.start.p0(i64 1, ptr %text) - call void @llvm.lifetime.end.p0(i64 1, ptr %text) + call void @llvm.lifetime.start.p0(ptr %text) + call void @llvm.lifetime.end.p0(ptr %text) ; CHECK: call void @llvm.lifetime.start ; CHECK-NEXT: call void @llvm.lifetime.end @@ -46,8 +46,8 @@ entry: ; CHECK-LABEL: @msan( %text = alloca i8, align 1 - call void @llvm.lifetime.start.p0(i64 1, ptr %text) - call void @llvm.lifetime.end.p0(i64 1, ptr %text) + call void @llvm.lifetime.start.p0(ptr %text) + call void @llvm.lifetime.end.p0(ptr %text) ; CHECK: call void @llvm.lifetime.start ; CHECK-NEXT: call void @llvm.lifetime.end @@ -61,8 +61,8 @@ entry: ; CHECK-LABEL: @no_asan( %text = alloca i8, align 1 - call void @llvm.lifetime.start.p0(i64 1, ptr %text) - call void @llvm.lifetime.end.p0(i64 1, ptr %text) + call void @llvm.lifetime.start.p0(ptr %text) + call void @llvm.lifetime.end.p0(ptr %text) ; OPT-NOT: call void @llvm.lifetime ; NOOPT: call void @llvm.lifetime.start ; NOOPT-NEXT: call void @llvm.lifetime.end diff --git a/llvm/test/Transforms/PhaseOrdering/loop-access-checks.ll b/llvm/test/Transforms/PhaseOrdering/loop-access-checks.ll index 45f18dd..ae0e591 100644 --- a/llvm/test/Transforms/PhaseOrdering/loop-access-checks.ll +++ b/llvm/test/Transforms/PhaseOrdering/loop-access-checks.ll @@ -52,7 +52,7 @@ entry: %elems.coerce.fca.1.extract = extractvalue [2 x i64] %elems.coerce, 1 %elems.coerce.fca.1.gep = getelementptr inbounds [2 x i64], ptr %elems, i64 0, i64 1 store i64 %elems.coerce.fca.1.extract, ptr %elems.coerce.fca.1.gep, align 8 - call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %__begin1) #6 + call void @llvm.lifetime.start.p0(ptr nonnull %__begin1) #6 %0 = load ptr, ptr %elems, align 8 %__size_.i.i = getelementptr inbounds %"class.std::__1::span", ptr %elems, i64 0, i32 1 %1 = load i64, ptr %__size_.i.i, align 8 @@ -66,7 +66,7 @@ entry: br i1 %cmp.not.i.i.i.i, label %error, label %check.2 check.2: - call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %__end1) #6 + call void @llvm.lifetime.start.p0(ptr nonnull %__end1) #6 %l4 = load ptr, ptr %elems, align 8 %__size_.i.i4 = getelementptr inbounds %"class.std::__1::span", ptr %elems, i64 0, i32 1 %l5 = load i64, ptr %__size_.i.i4, align 8 @@ -90,8 +90,8 @@ for.cond: br i1 %cmp.i, label %for.body, label %for.cond.cleanup for.cond.cleanup: ; preds = %for.cond - call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %__end1) - call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %__begin1) + call void @llvm.lifetime.end.p0(ptr nonnull %__end1) + call void @llvm.lifetime.end.p0(ptr nonnull %__begin1) ret void for.body: ; preds = %for.cond @@ -115,11 +115,11 @@ for.latch: declare void @error() -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) declare void @use(ptr noundef nonnull align 4 dereferenceable(4)) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) ; ------------------------------------------------------------------------- @@ -160,11 +160,11 @@ entry: %count = alloca i64, align 8 %i = alloca i64, align 8 store ptr %vec, ptr %vec.addr, align 8 - call void @llvm.lifetime.start.p0(i64 8, ptr %count) + call void @llvm.lifetime.start.p0(ptr %count) %0 = load ptr, ptr %vec.addr, align 8 %call = call noundef i64 @alloc(ptr noundef nonnull align 8 dereferenceable(24) %0) store i64 %call, ptr %count, align 8 - call void @llvm.lifetime.start.p0(i64 8, ptr %i) + call void @llvm.lifetime.start.p0(ptr %i) store i64 0, ptr %i, align 8 br label %for.cond @@ -175,7 +175,7 @@ for.cond: br i1 %cmp, label %for.body, label %for.cond.cleanup for.cond.cleanup: - call void @llvm.lifetime.end.p0(i64 8, ptr %i) + call void @llvm.lifetime.end.p0(ptr %i) br label %for.end for.body: @@ -194,7 +194,7 @@ for.inc: br label %for.cond for.end: - call void @llvm.lifetime.end.p0(i64 8, ptr %count) #5 + call void @llvm.lifetime.end.p0(ptr %count) #5 ret void } @@ -299,11 +299,11 @@ entry: %count = alloca i64, align 8 %i = alloca i64, align 8 store ptr %vec, ptr %vec.addr, align 8 - call void @llvm.lifetime.start.p0(i64 8, ptr %count) + call void @llvm.lifetime.start.p0(ptr %count) %0 = load ptr, ptr %vec.addr, align 8 %call = call noundef i64 @alloc(ptr noundef nonnull align 8 dereferenceable(24) %0) store i64 %call, ptr %count, align 8 - call void @llvm.lifetime.start.p0(i64 8, ptr %i) + call void @llvm.lifetime.start.p0(ptr %i) store i64 0, ptr %i, align 8 br label %for.cond @@ -314,7 +314,7 @@ for.cond: br i1 %cmp, label %for.body, label %for.cond.cleanup for.cond.cleanup: - call void @llvm.lifetime.end.p0(i64 8, ptr %i) + call void @llvm.lifetime.end.p0(ptr %i) br label %for.end for.body: @@ -333,7 +333,7 @@ for.inc: br label %for.cond for.end: - call void @llvm.lifetime.end.p0(i64 8, ptr %count) + call void @llvm.lifetime.end.p0(ptr %count) ret void } @@ -376,7 +376,7 @@ entry: %k = alloca i32, align 4 store ptr %arr, ptr %arr.addr, align 8 store i32 %len, ptr %len.addr, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr %i) #3 + call void @llvm.lifetime.start.p0(ptr %i) #3 store i32 1, ptr %i, align 4 br label %for.cond @@ -388,11 +388,11 @@ for.cond: ; preds = %for.inc5, %entry for.cond.cleanup: ; preds = %for.cond store i32 2, ptr %cleanup.dest.slot, align 4 - call void @llvm.lifetime.end.p0(i64 4, ptr %i) #3 + call void @llvm.lifetime.end.p0(ptr %i) #3 br label %for.end6 for.body: ; preds = %for.cond - call void @llvm.lifetime.start.p0(i64 4, ptr %k) #3 + call void @llvm.lifetime.start.p0(ptr %k) #3 %2 = load i32, ptr %i, align 4 store i32 %2, ptr %k, align 4 br label %for.cond1 @@ -404,7 +404,7 @@ for.cond1: ; preds = %for.inc, %for.body for.cond.cleanup3: ; preds = %for.cond1 store i32 5, ptr %cleanup.dest.slot, align 4 - call void @llvm.lifetime.end.p0(i64 4, ptr %k) #3 + call void @llvm.lifetime.end.p0(ptr %k) #3 br label %for.end for.body4: ; preds = %for.cond1 diff --git a/llvm/test/Transforms/PhaseOrdering/loop-rotation-vs-common-code-hoisting.ll b/llvm/test/Transforms/PhaseOrdering/loop-rotation-vs-common-code-hoisting.ll index c6b5e5f..5ff57ea 100644 --- a/llvm/test/Transforms/PhaseOrdering/loop-rotation-vs-common-code-hoisting.ll +++ b/llvm/test/Transforms/PhaseOrdering/loop-rotation-vs-common-code-hoisting.ll @@ -38,8 +38,8 @@ declare void @f0() declare void @f1() declare void @f2() -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) define void @_Z4loopi(i32 %width) { ; HOIST-LABEL: @_Z4loopi( @@ -100,7 +100,7 @@ if.then: br label %return if.end: - call void @llvm.lifetime.start.p0(i64 4, ptr %i) + call void @llvm.lifetime.start.p0(ptr %i) store i32 0, ptr %i, align 4 br label %for.cond @@ -112,7 +112,7 @@ for.cond: br i1 %cmp1, label %for.body, label %for.cond.cleanup for.cond.cleanup: - call void @llvm.lifetime.end.p0(i64 4, ptr %i) + call void @llvm.lifetime.end.p0(ptr %i) br label %for.end for.body: diff --git a/llvm/test/Transforms/PhaseOrdering/vector-select.ll b/llvm/test/Transforms/PhaseOrdering/vector-select.ll index 1bdd135..c228723 100644 --- a/llvm/test/Transforms/PhaseOrdering/vector-select.ll +++ b/llvm/test/Transforms/PhaseOrdering/vector-select.ll @@ -19,9 +19,9 @@ define <3 x float> @PR52631(<3 x float> %a, <3 x float> %b, <3 x i32> %c) { store <4 x float> %extractVec1, ptr %b.addr, align 16 %extractVec3 = shufflevector <3 x i32> %c, <3 x i32> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 undef> store <4 x i32> %extractVec3, ptr %c.addr, align 16 - call void @llvm.lifetime.start.p0(i64 16, ptr %zero) #2 + call void @llvm.lifetime.start.p0(ptr %zero) #2 store <4 x i32> <i32 0, i32 0, i32 0, i32 undef>, ptr %zero, align 16 - call void @llvm.lifetime.start.p0(i64 16, ptr %mask) #2 + call void @llvm.lifetime.start.p0(ptr %mask) #2 %loadVec4 = load <4 x i32>, ptr %zero, align 16 %extractVec6 = shufflevector <4 x i32> %loadVec4, <4 x i32> poison, <3 x i32> <i32 0, i32 1, i32 2> %loadVec48 = load <4 x i32>, ptr %c.addr, align 16 @@ -30,7 +30,7 @@ define <3 x float> @PR52631(<3 x float> %a, <3 x float> %b, <3 x i32> %c) { %sext = sext <3 x i1> %cmp to <3 x i32> %extractVec10 = shufflevector <3 x i32> %sext, <3 x i32> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 undef> store <4 x i32> %extractVec10, ptr %mask, align 16 - call void @llvm.lifetime.start.p0(i64 16, ptr %res) #2 + call void @llvm.lifetime.start.p0(ptr %res) #2 %loadVec413 = load <4 x i32>, ptr %mask, align 16 %extractVec14 = shufflevector <4 x i32> %loadVec413, <4 x i32> poison, <3 x i32> <i32 0, i32 1, i32 2> %loadVec416 = load <4 x float>, ptr %b.addr, align 16 @@ -51,9 +51,9 @@ define <3 x float> @PR52631(<3 x float> %a, <3 x float> %b, <3 x i32> %c) { %extractVec32 = shufflevector <4 x i32> %loadVec431, <4 x i32> poison, <3 x i32> <i32 0, i32 1, i32 2> %or = or <3 x i32> %and29, %extractVec32 %astype33 = bitcast <3 x i32> %or to <3 x float> - call void @llvm.lifetime.end.p0(i64 16, ptr %res) #2 - call void @llvm.lifetime.end.p0(i64 16, ptr %mask) #2 - call void @llvm.lifetime.end.p0(i64 16, ptr %zero) #2 + call void @llvm.lifetime.end.p0(ptr %res) #2 + call void @llvm.lifetime.end.p0(ptr %mask) #2 + call void @llvm.lifetime.end.p0(ptr %zero) #2 ret <3 x float> %astype33 } @@ -112,5 +112,5 @@ for.end: ret <4 x i32> %min.addr.0 } -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1 -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 diff --git a/llvm/test/Transforms/SROA/alloca-address-space.ll b/llvm/test/Transforms/SROA/alloca-address-space.ll index 31305c8..941178f 100644 --- a/llvm/test/Transforms/SROA/alloca-address-space.ll +++ b/llvm/test/Transforms/SROA/alloca-address-space.ll @@ -140,7 +140,7 @@ define void @addressspace_alloca_lifetime() { ; CHECK-NEXT: ret void ; %alloca = alloca i8, align 8, addrspace(2) - call void @llvm.lifetime.start(i64 2, ptr addrspace(2) %alloca) + call void @llvm.lifetime.start(ptr addrspace(2) %alloca) ret void } diff --git a/llvm/test/Transforms/SROA/basictest.ll b/llvm/test/Transforms/SROA/basictest.ll index 3034aaa..15803f7 100644 --- a/llvm/test/Transforms/SROA/basictest.ll +++ b/llvm/test/Transforms/SROA/basictest.ll @@ -4,8 +4,8 @@ target datalayout = "e-p:64:64:64-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n8:16:32:64" -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) define i32 @test0() { ; CHECK-LABEL: @test0( @@ -18,21 +18,21 @@ entry: %a1 = alloca i32 %a2 = alloca float - call void @llvm.lifetime.start.p0(i64 4, ptr %a1) + call void @llvm.lifetime.start.p0(ptr %a1) store i32 0, ptr %a1 %v1 = load i32, ptr %a1 - call void @llvm.lifetime.end.p0(i64 4, ptr %a1) + call void @llvm.lifetime.end.p0(ptr %a1) - call void @llvm.lifetime.start.p0(i64 4, ptr %a2) + call void @llvm.lifetime.start.p0(ptr %a2) store float 0.0, ptr %a2 %v2 = load float , ptr %a2 %v2.int = bitcast float %v2 to i32 %sum1 = add i32 %v1, %v2.int - call void @llvm.lifetime.end.p0(i64 4, ptr %a2) + call void @llvm.lifetime.end.p0(ptr %a2) ret i32 %sum1 } @@ -1102,7 +1102,7 @@ define void @PR14059.1(ptr %d) { ; entry: %X.sroa.0.i = alloca double, align 8 - call void @llvm.lifetime.start.p0(i64 -1, ptr %X.sroa.0.i) + call void @llvm.lifetime.start.p0(ptr %X.sroa.0.i) ; Store to the low 32-bits... store i32 0, ptr %X.sroa.0.i, align 8 @@ -1126,7 +1126,7 @@ entry: %accum.real.i = load double, ptr %d, align 8 %add.r.i = fadd double %accum.real.i, %X.sroa.0.0.load1.i store double %add.r.i, ptr %d, align 8 - call void @llvm.lifetime.end.p0(i64 -1, ptr %X.sroa.0.i) + call void @llvm.lifetime.end.p0(ptr %X.sroa.0.i) ret void } @@ -1812,7 +1812,7 @@ define void @PR25873(ptr %outData) { ; entry: %tmpData = alloca %struct.STest, align 8 - call void @llvm.lifetime.start.p0(i64 16, ptr %tmpData) + call void @llvm.lifetime.start.p0(ptr %tmpData) store float 1.230000e+02, ptr %tmpData, align 8 %y = getelementptr inbounds %struct.STest, ptr %tmpData, i64 0, i32 0, i32 1 store float 4.560000e+02, ptr %y, align 4 @@ -1820,7 +1820,7 @@ entry: %0 = load i64, ptr %tmpData, align 8 store i64 %0, ptr %m_posB, align 8 call void @llvm.memcpy.p0.p0.i64(ptr align 4 %outData, ptr align 4 %tmpData, i64 16, i1 false) - call void @llvm.lifetime.end.p0(i64 16, ptr %tmpData) + call void @llvm.lifetime.end.p0(ptr %tmpData) ret void } @@ -1833,8 +1833,8 @@ define void @PR27999() unnamed_addr { ; entry-block: %0 = alloca [2 x i64], align 8 - call void @llvm.lifetime.start.p0(i64 16, ptr %0) - call void @llvm.lifetime.end.p0(i64 8, ptr %0) + call void @llvm.lifetime.start.p0(ptr %0) + call void @llvm.lifetime.end.p0(ptr %0) ret void } @@ -1846,7 +1846,7 @@ define void @PR29139() { bb1: %e.7.sroa.6.i = alloca i32, align 1 %e.7.sroa.6.0.load81.i = load i32, ptr %e.7.sroa.6.i, align 1 - call void @llvm.lifetime.end.p0(i64 2, ptr %e.7.sroa.6.i) + call void @llvm.lifetime.end.p0(ptr %e.7.sroa.6.i) ret void } @@ -1898,8 +1898,8 @@ entry: ret void } -declare void @llvm.lifetime.start.isVoid.i64.p0(i64, ptr nocapture) -declare void @llvm.lifetime.end.isVoid.i64.p0(i64, ptr nocapture) +declare void @llvm.lifetime.start.isVoid.i64.p0(ptr nocapture) +declare void @llvm.lifetime.end.isVoid.i64.p0(ptr nocapture) @array = dso_local global [10 x float] zeroinitializer, align 4 define void @test29(i32 %num, i32 %tid) { @@ -1931,7 +1931,7 @@ define void @test29(i32 %num, i32 %tid) { ; entry: %ra = alloca [10 x float], align 4 - call void @llvm.lifetime.start.isVoid.i64.p0(i64 40, ptr nonnull %ra) + call void @llvm.lifetime.start.isVoid.i64.p0(ptr nonnull %ra) %cmp1 = icmp sgt i32 %num, 0 br i1 %cmp1, label %bb1, label %bb7 @@ -1963,7 +1963,7 @@ bb6: br label %bb7 bb7: - call void @llvm.lifetime.end.isVoid.i64.p0(i64 40, ptr nonnull %ra) + call void @llvm.lifetime.end.isVoid.i64.p0(ptr nonnull %ra) ret void } diff --git a/llvm/test/Transforms/SROA/dead-inst.ll b/llvm/test/Transforms/SROA/dead-inst.ll index 44ae821..bf47722 100644 --- a/llvm/test/Transforms/SROA/dead-inst.ll +++ b/llvm/test/Transforms/SROA/dead-inst.ll @@ -47,7 +47,7 @@ define void @H(ptr noalias nocapture readnone, [2 x i64], ptr %ptr, i32 signext ; CHECK-NEXT: [[TMP21:%.*]] = phi i64 [ -1, [[TMP12]] ], [ [[TMP20]], [[TMP17]] ] ; CHECK-NEXT: [[TMP22:%.*]] = inttoptr i64 0 to ptr ; CHECK-NEXT: [[TMP23:%.*]] = sub nsw i64 [[TMP21]], [[TMP13]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[TMP3]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP3]]) ; CHECK-NEXT: [[TMP24:%.*]] = icmp ult i64 [[TMP23]], 2 ; CHECK-NEXT: br i1 [[TMP24]], label [[G_EXIT:%.*]], label [[TMP25:%.*]] ; CHECK: 25: @@ -60,7 +60,7 @@ define void @H(ptr noalias nocapture readnone, [2 x i64], ptr %ptr, i32 signext ; CHECK-NEXT: call void @D(ptr nonnull sret([[CLASS_B]]) [[TMP3]], ptr nonnull dereferenceable(32) [[PTR2:%.*]]) ; CHECK-NEXT: br label [[G_EXIT]] ; CHECK: G.exit: -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[TMP3]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP3]]) ; CHECK-NEXT: br label [[FOO]] ; CHECK: foo: ; CHECK-NEXT: ret void @@ -105,7 +105,7 @@ a.exit: %22 = phi i64 [ -1, %12 ], [ %21, %18 ] %23 = load ptr, ptr %13, align 8 %24 = sub nsw i64 %22, %14 - call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %3) + call void @llvm.lifetime.start.p0(ptr nonnull %3) %25 = icmp ult i64 %24, 2 br i1 %25, label %G.exit, label %26 @@ -122,7 +122,7 @@ a.exit: br label %G.exit G.exit: - call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %3) + call void @llvm.lifetime.end.p0(ptr nonnull %3) br label %foo foo: @@ -133,10 +133,10 @@ foo: declare ptr @memchr(ptr, i32 signext, i64) local_unnamed_addr ; Function Attrs: argmemonly nounwind -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) ; Function Attrs: argmemonly nounwind -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: ; CHECK-MODIFY-CFG: {{.*}} ; CHECK-PRESERVE-CFG: {{.*}} diff --git a/llvm/test/Transforms/SROA/ignore-droppable.ll b/llvm/test/Transforms/SROA/ignore-droppable.ll index 9c95dc0..ba581bb 100644 --- a/llvm/test/Transforms/SROA/ignore-droppable.ll +++ b/llvm/test/Transforms/SROA/ignore-droppable.ll @@ -3,8 +3,8 @@ ; RUN: opt < %s -passes='sroa<modify-cfg>' -S | FileCheck %s --check-prefixes=CHECK,CHECK-MODIFY-CFG declare void @llvm.assume(i1) -declare void @llvm.lifetime.start.p0(i64 %size, ptr nocapture %ptr) -declare void @llvm.lifetime.end.p0(i64 %size, ptr nocapture %ptr) +declare void @llvm.lifetime.start.p0(ptr nocapture %ptr) +declare void @llvm.lifetime.end.p0(ptr nocapture %ptr) define void @positive_assume_uses(ptr %arg) { ; CHECK-LABEL: @positive_assume_uses( @@ -55,10 +55,10 @@ define void @positive_gep_assume_uses() { ; %A = alloca {i8, i16} %B = getelementptr {i8, i16}, ptr %A, i32 0, i32 0 - call void @llvm.lifetime.start.p0(i64 2, ptr %A) + call void @llvm.lifetime.start.p0(ptr %A) call void @llvm.assume(i1 true) ["align"(ptr %B, i64 8), "align"(ptr %B, i64 16)] store {i8, i16} zeroinitializer, ptr %A - call void @llvm.lifetime.end.p0(i64 2, ptr %A) + call void @llvm.lifetime.end.p0(ptr %A) call void @llvm.assume(i1 true) ["nonnull"(ptr %B), "align"(ptr %B, i64 2)] ret void } @@ -71,10 +71,10 @@ define void @positive_mixed_assume_uses() { ; CHECK-NEXT: ret void ; %A = alloca i8 - call void @llvm.lifetime.start.p0(i64 2, ptr %A) + call void @llvm.lifetime.start.p0(ptr %A) call void @llvm.assume(i1 true) ["nonnull"(ptr %A), "align"(ptr %A, i64 8), "align"(ptr %A, i64 16)] store i8 1, ptr %A - call void @llvm.lifetime.end.p0(i64 2, ptr %A) + call void @llvm.lifetime.end.p0(ptr %A) call void @llvm.assume(i1 true) ["nonnull"(ptr %A), "align"(ptr %A, i64 2), "nonnull"(ptr %A)] call void @llvm.assume(i1 true) ["nonnull"(ptr %A), "align"(ptr %A, i64 2), "nonnull"(ptr %A)] ret void diff --git a/llvm/test/Transforms/SROA/lifetime-intrinsic.ll b/llvm/test/Transforms/SROA/lifetime-intrinsic.ll index b9e8873..668903d 100644 --- a/llvm/test/Transforms/SROA/lifetime-intrinsic.ll +++ b/llvm/test/Transforms/SROA/lifetime-intrinsic.ll @@ -18,14 +18,14 @@ define i16 @with_lifetime(i32 %a, i32 %b) #0 { ; CHECK-NEXT: ret i16 [[RET]] ; %arr = alloca %i32x2, align 4 - call void @llvm.lifetime.start.p0(i64 8, ptr %arr) + call void @llvm.lifetime.start.p0(ptr %arr) %p1 = getelementptr inbounds %i32x2, ptr %arr, i64 0, i32 0, i32 1 store i32 %a, ptr %arr, align 4 store i32 %b, ptr %p1, align 4 %s0 = load i16, ptr %arr, align 4 %s2 = load i16, ptr %p1, align 4 %ret = add i16 %s0, %s2 - call void @llvm.lifetime.end.p0(i64 8, ptr %arr) + call void @llvm.lifetime.end.p0(ptr %arr) ret i16 %ret } @@ -50,9 +50,9 @@ define i16 @no_lifetime(i32 %a, i32 %b) #0 { ret i16 %ret } -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 attributes #0 = { alwaysinline nounwind } attributes #1 = { argmemonly nounwind } diff --git a/llvm/test/Transforms/SROA/non-capturing-call-readonly.ll b/llvm/test/Transforms/SROA/non-capturing-call-readonly.ll index 13808b2..b86f41b 100644 --- a/llvm/test/Transforms/SROA/non-capturing-call-readonly.ll +++ b/llvm/test/Transforms/SROA/non-capturing-call-readonly.ll @@ -791,13 +791,13 @@ entry: ret i32 0 } -declare void @llvm.lifetime.start.p0(i64 immarg, ptr) +declare void @llvm.lifetime.start.p0(ptr) define i64 @do_schedule_instrs_for_dce_after_fixups() { ; CHECK-LABEL: @do_schedule_instrs_for_dce_after_fixups( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[C:%.*]] = alloca i64, align 2 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[C]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[C]]) ; CHECK-NEXT: store i64 0, ptr [[C]], align 4 ; CHECK-NEXT: br label [[IF_END:%.*]] ; CHECK: if.end: @@ -807,7 +807,7 @@ define i64 @do_schedule_instrs_for_dce_after_fixups() { ; entry: %c = alloca i64, align 2 - call void @llvm.lifetime.start.p0(i64 1, ptr %c) + call void @llvm.lifetime.start.p0(ptr %c) store i64 0, ptr %c br label %if.end diff --git a/llvm/test/Transforms/SROA/pr26972.ll b/llvm/test/Transforms/SROA/pr26972.ll index a2872c7..526db3c 100644 --- a/llvm/test/Transforms/SROA/pr26972.ll +++ b/llvm/test/Transforms/SROA/pr26972.ll @@ -12,11 +12,11 @@ define void @fn1() { ; CHECK-NEXT: ret void ; %a = alloca [1073741825 x i32], align 16 - call void @llvm.lifetime.end.p0(i64 4294967300, ptr %a) + call void @llvm.lifetime.end.p0(ptr %a) ret void } -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: ; CHECK-MODIFY-CFG: {{.*}} ; CHECK-PRESERVE-CFG: {{.*}} diff --git a/llvm/test/Transforms/SROA/readonlynocapture.ll b/llvm/test/Transforms/SROA/readonlynocapture.ll index 5752fadd..b6f7b1f 100644 --- a/llvm/test/Transforms/SROA/readonlynocapture.ll +++ b/llvm/test/Transforms/SROA/readonlynocapture.ll @@ -284,25 +284,25 @@ define void @incompletestruct(i1 %b, i1 %c) { ; CHECK-LABEL: @incompletestruct( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[LII:%.*]] = alloca [[STRUCT_LOADIMMEDIATEINFO:%.*]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[LII]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[LII]]) ; CHECK-NEXT: [[BF_CLEAR4:%.*]] = and i32 undef, -262144 ; CHECK-NEXT: [[BF_SET5:%.*]] = select i1 [[B:%.*]], i32 196608, i32 131072 ; CHECK-NEXT: [[BF_SET12:%.*]] = or disjoint i32 [[BF_SET5]], [[BF_CLEAR4]] ; CHECK-NEXT: store i32 [[BF_SET12]], ptr [[LII]], align 4 ; CHECK-NEXT: call void @callee(ptr [[LII]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[LII]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[LII]]) ; CHECK-NEXT: ret void ; entry: %LII = alloca %struct.LoadImmediateInfo, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %LII) + call void @llvm.lifetime.start.p0(ptr nonnull %LII) %bf.load = load i32, ptr %LII, align 4 %bf.clear4 = and i32 %bf.load, -262144 %bf.set5 = select i1 %b, i32 196608, i32 131072 %bf.set12 = or disjoint i32 %bf.set5, %bf.clear4 store i32 %bf.set12, ptr %LII, align 4 call void @callee(ptr %LII) - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %LII) + call void @llvm.lifetime.end.p0(ptr nonnull %LII) ret void } @@ -312,13 +312,13 @@ define void @incompletestruct_bb(i1 %b, i1 %c) { ; CHECK-NEXT: [[LII:%.*]] = alloca [[STRUCT_LOADIMMEDIATEINFO:%.*]], align 4 ; CHECK-NEXT: br i1 [[C:%.*]], label [[IF_THEN:%.*]], label [[IF_END:%.*]] ; CHECK: if.then: -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[LII]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[LII]]) ; CHECK-NEXT: [[BF_CLEAR4:%.*]] = and i32 undef, -262144 ; CHECK-NEXT: [[BF_SET5:%.*]] = select i1 [[B:%.*]], i32 196608, i32 131072 ; CHECK-NEXT: [[BF_SET12:%.*]] = or disjoint i32 [[BF_SET5]], [[BF_CLEAR4]] ; CHECK-NEXT: store i32 [[BF_SET12]], ptr [[LII]], align 4 ; CHECK-NEXT: call void @callee(ptr [[LII]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[LII]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[LII]]) ; CHECK-NEXT: br label [[IF_END]] ; CHECK: if.end: ; CHECK-NEXT: ret void @@ -328,14 +328,14 @@ entry: br i1 %c, label %if.then, label %if.end if.then: ; preds = %entry - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %LII) + call void @llvm.lifetime.start.p0(ptr nonnull %LII) %bf.load = load i32, ptr %LII, align 4 %bf.clear4 = and i32 %bf.load, -262144 %bf.set5 = select i1 %b, i32 196608, i32 131072 %bf.set12 = or disjoint i32 %bf.set5, %bf.clear4 store i32 %bf.set12, ptr %LII, align 4 call void @callee(ptr %LII) - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %LII) + call void @llvm.lifetime.end.p0(ptr nonnull %LII) br label %if.end if.end: ; preds = %if.then, %entry @@ -459,35 +459,35 @@ define i32 @provenance_only_capture() { define i32 @simple_with_lifetimes() { ; CHECK-LABEL: @simple_with_lifetimes( ; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]]) ; CHECK-NEXT: store i32 0, ptr [[A]], align 4 ; CHECK-NEXT: call void @callee(ptr [[A]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]]) ; CHECK-NEXT: ret i32 0 ; %a = alloca i32 - call void @llvm.lifetime.start(i64 4, ptr %a) + call void @llvm.lifetime.start(ptr %a) store i32 0, ptr %a call void @callee(ptr %a) %l1 = load i32, ptr %a - call void @llvm.lifetime.end(i64 4, ptr %a) + call void @llvm.lifetime.end(ptr %a) ret i32 %l1 } define i32 @twoalloc_with_lifetimes() { ; CHECK-LABEL: @twoalloc_with_lifetimes( ; CHECK-NEXT: [[A:%.*]] = alloca { i32, i32 }, align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]]) ; CHECK-NEXT: store i32 0, ptr [[A]], align 4 ; CHECK-NEXT: [[B:%.*]] = getelementptr i32, ptr [[A]], i32 1 ; CHECK-NEXT: store i32 1, ptr [[B]], align 4 ; CHECK-NEXT: call void @callee(ptr [[A]]) ; CHECK-NEXT: [[R:%.*]] = add i32 0, 1 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]]) ; CHECK-NEXT: ret i32 [[R]] ; %a = alloca {i32, i32} - call void @llvm.lifetime.start(i64 8, ptr %a) + call void @llvm.lifetime.start(ptr %a) store i32 0, ptr %a %b = getelementptr i32, ptr %a, i32 1 store i32 1, ptr %b @@ -495,7 +495,7 @@ define i32 @twoalloc_with_lifetimes() { %l1 = load i32, ptr %a %l2 = load i32, ptr %b %r = add i32 %l1, %l2 - call void @llvm.lifetime.end(i64 8, ptr %a) + call void @llvm.lifetime.end(ptr %a) ret i32 %r } diff --git a/llvm/test/Transforms/SROA/select-load.ll b/llvm/test/Transforms/SROA/select-load.ll index 9de7650..359ecaa 100644 --- a/llvm/test/Transforms/SROA/select-load.ll +++ b/llvm/test/Transforms/SROA/select-load.ll @@ -118,7 +118,7 @@ define i32 @interfering_lifetime(ptr %data, i64 %indvars.iv) { %min = alloca i32, align 4 %arrayidx = getelementptr inbounds i32, ptr %data, i64 %indvars.iv %i1 = load i32, ptr %arrayidx, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr %min) + call void @llvm.lifetime.start.p0(ptr %min) store i32 0, ptr %min, align 4 %cmp.i.i = icmp slt i32 %i1, 0 %__b.__a.i.i = select i1 %cmp.i.i, ptr %min, ptr %arrayidx @@ -132,9 +132,9 @@ define i32 @clamp_load_to_constant_range(ptr %data, i64 %indvars.iv) { ; CHECK-PRESERVE-CFG-NEXT: [[MIN:%.*]] = alloca i32, align 4 ; CHECK-PRESERVE-CFG-NEXT: [[MAX:%.*]] = alloca i32, align 4 ; CHECK-PRESERVE-CFG-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[DATA:%.*]], i64 [[INDVARS_IV:%.*]] -; CHECK-PRESERVE-CFG-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[MIN]]) +; CHECK-PRESERVE-CFG-NEXT: call void @llvm.lifetime.start.p0(ptr [[MIN]]) ; CHECK-PRESERVE-CFG-NEXT: store i32 0, ptr [[MIN]], align 4 -; CHECK-PRESERVE-CFG-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[MAX]]) +; CHECK-PRESERVE-CFG-NEXT: call void @llvm.lifetime.start.p0(ptr [[MAX]]) ; CHECK-PRESERVE-CFG-NEXT: store i32 4095, ptr [[MAX]], align 4 ; CHECK-PRESERVE-CFG-NEXT: [[I1:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; CHECK-PRESERVE-CFG-NEXT: [[CMP_I_I:%.*]] = icmp slt i32 [[I1]], 0 @@ -167,9 +167,9 @@ define i32 @clamp_load_to_constant_range(ptr %data, i64 %indvars.iv) { %min = alloca i32, align 4 %max = alloca i32, align 4 %arrayidx = getelementptr inbounds i32, ptr %data, i64 %indvars.iv - call void @llvm.lifetime.start.p0(i64 4, ptr %min) + call void @llvm.lifetime.start.p0(ptr %min) store i32 0, ptr %min, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr %max) + call void @llvm.lifetime.start.p0(ptr %max) store i32 4095, ptr %max, align 4 %i1 = load i32, ptr %arrayidx, align 4 %cmp.i.i = icmp slt i32 %i1, 0 @@ -482,6 +482,6 @@ define void @load_of_select_with_noundef_nonnull(ptr %buffer, i1 %b) { ; Ensure that the branch metadata is reversed to match the reversals above. -declare void @llvm.lifetime.start.p0(i64, ptr ) -declare void @llvm.lifetime.end.p0(i64, ptr) +declare void @llvm.lifetime.start.p0(ptr ) +declare void @llvm.lifetime.end.p0(ptr) declare i32 @llvm.smax.i32(i32, i32) diff --git a/llvm/test/Transforms/SROA/vector-lifetime-intrinsic.ll b/llvm/test/Transforms/SROA/vector-lifetime-intrinsic.ll index 561315b..60228a4 100644 --- a/llvm/test/Transforms/SROA/vector-lifetime-intrinsic.ll +++ b/llvm/test/Transforms/SROA/vector-lifetime-intrinsic.ll @@ -5,10 +5,10 @@ target datalayout = "e-p:64:32-i64:32-v32:32-n32-S64" ; Function Attrs: nounwind -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #0 +declare void @llvm.lifetime.start.p0(ptr nocapture) #0 ; Function Attrs: nounwind -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #0 +declare void @llvm.lifetime.end.p0(ptr nocapture) #0 define void @wombat(<4 x float> %arg1) { ; CHECK-LABEL: @wombat( @@ -19,10 +19,10 @@ define void @wombat(<4 x float> %arg1) { ; bb: %tmp = alloca <4 x float>, align 16 - call void @llvm.lifetime.start.p0(i64 16, ptr %tmp) + call void @llvm.lifetime.start.p0(ptr %tmp) store <4 x float> %arg1, ptr %tmp, align 16 %tmp18 = load <3 x float>, ptr %tmp - call void @llvm.lifetime.end.p0(i64 16, ptr %tmp) + call void @llvm.lifetime.end.p0(ptr %tmp) call void @wombat3(<3 x float> %tmp18) ret void } diff --git a/llvm/test/Transforms/SROA/vector-promotion.ll b/llvm/test/Transforms/SROA/vector-promotion.ll index ffa758e..682e8e3 100644 --- a/llvm/test/Transforms/SROA/vector-promotion.ll +++ b/llvm/test/Transforms/SROA/vector-promotion.ll @@ -1534,7 +1534,7 @@ bb.5: } declare void @llvm.memcpy.p0.p0.i64(ptr, ptr, i64, i1) -declare void @llvm.lifetime.end.p0(i64, ptr) +declare void @llvm.lifetime.end.p0(ptr) ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: ; CHECK-MODIFY-CFG: {{.*}} ; CHECK-PRESERVE-CFG: {{.*}} diff --git a/llvm/test/Transforms/SafeStack/ARM/debug.ll b/llvm/test/Transforms/SafeStack/ARM/debug.ll index a8c534c..207475a 100644 --- a/llvm/test/Transforms/SafeStack/ARM/debug.ll +++ b/llvm/test/Transforms/SafeStack/ARM/debug.ll @@ -29,15 +29,15 @@ entry: define void @f() local_unnamed_addr #1 !dbg !27 { entry: %c = alloca [16 x i8], align 1 - call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %c) #5, !dbg !35 + call void @llvm.lifetime.start.p0(ptr nonnull %c) #5, !dbg !35 call void @llvm.dbg.declare(metadata ptr %c, metadata !31, metadata !DIExpression()), !dbg !36 call void @Capture(ptr nonnull %c) #5, !dbg !37 - call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %c) #5, !dbg !38 + call void @llvm.lifetime.end.p0(ptr nonnull %c) #5, !dbg !38 ret void, !dbg !38 } ; Function Attrs: argmemonly nounwind -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #2 +declare void @llvm.lifetime.start.p0(ptr nocapture) #2 ; Function Attrs: nounwind readnone speculatable declare void @llvm.dbg.declare(metadata, metadata, metadata) #3 @@ -45,7 +45,7 @@ declare void @llvm.dbg.declare(metadata, metadata, metadata) #3 declare void @Capture(ptr) local_unnamed_addr #4 ; Function Attrs: argmemonly nounwind -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #2 +declare void @llvm.lifetime.end.p0(ptr nocapture) #2 attributes #0 = { norecurse nounwind readonly safestack "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+armv7-a,+dsp,+neon,+vfp3,-thumb-mode" "unsafe-fp-math"="false" "use-soft-float"="false" } attributes #1 = { nounwind safestack "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+armv7-a,+dsp,+neon,+vfp3,-thumb-mode" "unsafe-fp-math"="false" "use-soft-float"="false" } diff --git a/llvm/test/Transforms/SafeStack/X86/call.ll b/llvm/test/Transforms/SafeStack/X86/call.ll index 9592b33..f14e676 100644 --- a/llvm/test/Transforms/SafeStack/X86/call.ll +++ b/llvm/test/Transforms/SafeStack/X86/call.ll @@ -152,8 +152,8 @@ define void @call_lifetime(ptr %p) { ; CHECK: ret void entry: %q = alloca [100 x i8], align 16 - call void @llvm.lifetime.start.p0(i64 100, ptr %q) - call void @llvm.lifetime.end.p0(i64 100, ptr %q) + call void @llvm.lifetime.start.p0(ptr %q) + call void @llvm.lifetime.end.p0(ptr %q) ret void } @@ -167,5 +167,5 @@ declare void @readnone0(ptr nocapture readnone, ptr nocapture) declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) nounwind argmemonly -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind argmemonly -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind argmemonly +declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind argmemonly +declare void @llvm.lifetime.end.p0(ptr nocapture) nounwind argmemonly diff --git a/llvm/test/Transforms/SafeStack/X86/coloring-ssp.ll b/llvm/test/Transforms/SafeStack/X86/coloring-ssp.ll index 8ff369e..5192e47 100644 --- a/llvm/test/Transforms/SafeStack/X86/coloring-ssp.ll +++ b/llvm/test/Transforms/SafeStack/X86/coloring-ssp.ll @@ -14,19 +14,19 @@ entry: %x = alloca i64, align 8 %y = alloca i64, align 8 - call void @llvm.lifetime.start.p0(i64 -1, ptr %x) + call void @llvm.lifetime.start.p0(ptr %x) ; CHECK: getelementptr i8, ptr %[[USP]], i32 -16 call void @capture64(ptr %x) - call void @llvm.lifetime.end.p0(i64 -1, ptr %x) + call void @llvm.lifetime.end.p0(ptr %x) - call void @llvm.lifetime.start.p0(i64 -1, ptr %y) + call void @llvm.lifetime.start.p0(ptr %y) ; CHECK: getelementptr i8, ptr %[[USP]], i32 -16 call void @capture64(ptr %y) - call void @llvm.lifetime.end.p0(i64 -1, ptr %y) + call void @llvm.lifetime.end.p0(ptr %y) ret void } -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) declare void @capture64(ptr) diff --git a/llvm/test/Transforms/SafeStack/X86/coloring.ll b/llvm/test/Transforms/SafeStack/X86/coloring.ll index 22e1487..288ae00 100644 --- a/llvm/test/Transforms/SafeStack/X86/coloring.ll +++ b/llvm/test/Transforms/SafeStack/X86/coloring.ll @@ -11,30 +11,30 @@ entry: %x = alloca i32, align 4 %x1 = alloca i32, align 4 %x2 = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr %x) + call void @llvm.lifetime.start.p0(ptr %x) ; CHECK: %[[A1:.*]] = getelementptr i8, ptr %[[USP]], i32 -4 ; CHECK: call void @capture(ptr nonnull %[[A1]]) call void @capture(ptr nonnull %x) - call void @llvm.lifetime.end.p0(i64 4, ptr %x) - call void @llvm.lifetime.start.p0(i64 4, ptr %x1) + call void @llvm.lifetime.end.p0(ptr %x) + call void @llvm.lifetime.start.p0(ptr %x1) ; CHECK: %[[B1:.*]] = getelementptr i8, ptr %[[USP]], i32 -4 ; CHECK: call void @capture(ptr nonnull %[[B1]]) call void @capture(ptr nonnull %x1) - call void @llvm.lifetime.end.p0(i64 4, ptr %x1) - call void @llvm.lifetime.start.p0(i64 4, ptr %x2) + call void @llvm.lifetime.end.p0(ptr %x1) + call void @llvm.lifetime.start.p0(ptr %x2) ; CHECK: %[[C1:.*]] = getelementptr i8, ptr %[[USP]], i32 -4 ; CHECK: call void @capture(ptr nonnull %[[C1]]) call void @capture(ptr nonnull %x2) - call void @llvm.lifetime.end.p0(i64 4, ptr %x2) + call void @llvm.lifetime.end.p0(ptr %x2) ret void } -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) declare void @capture(ptr) diff --git a/llvm/test/Transforms/SafeStack/X86/coloring2.ll b/llvm/test/Transforms/SafeStack/X86/coloring2.ll index ae5f375..a4157cb 100644 --- a/llvm/test/Transforms/SafeStack/X86/coloring2.ll +++ b/llvm/test/Transforms/SafeStack/X86/coloring2.ll @@ -14,21 +14,21 @@ entry: %y = alloca i32, align 4 %z = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 -1, ptr %z) - call void @llvm.lifetime.start.p0(i64 -1, ptr %x) + call void @llvm.lifetime.start.p0(ptr %z) + call void @llvm.lifetime.start.p0(ptr %x) ; CHECK: getelementptr i8, ptr %[[USP]], i32 -4 call void @capture32(ptr %x) - call void @llvm.lifetime.end.p0(i64 -1, ptr %x) - call void @llvm.lifetime.start.p0(i64 -1, ptr %y) + call void @llvm.lifetime.end.p0(ptr %x) + call void @llvm.lifetime.start.p0(ptr %y) ; CHECK: getelementptr i8, ptr %[[USP]], i32 -4 call void @capture32(ptr %y) - call void @llvm.lifetime.end.p0(i64 -1, ptr %y) + call void @llvm.lifetime.end.p0(ptr %y) ; CHECK: getelementptr i8, ptr %[[USP]], i32 -8 call void @capture32(ptr %z) - call void @llvm.lifetime.end.p0(i64 -1, ptr %z) + call void @llvm.lifetime.end.p0(ptr %z) ret void } @@ -42,11 +42,11 @@ entry: %x = alloca i32, align 4 %y = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 -1, ptr %x) + call void @llvm.lifetime.start.p0(ptr %x) ; CHECK: getelementptr i8, ptr %[[USP]], i32 -4 call void @capture32(ptr %x) - call void @llvm.lifetime.end.p0(i64 -1, ptr %x) + call void @llvm.lifetime.end.p0(ptr %x) ; CHECK: getelementptr i8, ptr %[[USP]], i32 -8 call void @capture32(ptr %y) @@ -65,21 +65,21 @@ entry: %y = alloca i32, align 4 %z = alloca i64, align 4 - call void @llvm.lifetime.start.p0(i64 -1, ptr %x) - call void @llvm.lifetime.start.p0(i64 -1, ptr %y) + call void @llvm.lifetime.start.p0(ptr %x) + call void @llvm.lifetime.start.p0(ptr %y) ; CHECK: getelementptr i8, ptr %[[USP]], i32 -4 call void @capture32(ptr %x) - call void @llvm.lifetime.end.p0(i64 -1, ptr %x) + call void @llvm.lifetime.end.p0(ptr %x) ; CHECK: getelementptr i8, ptr %[[USP]], i32 -8 call void @capture32(ptr %y) - call void @llvm.lifetime.end.p0(i64 -1, ptr %y) - call void @llvm.lifetime.start.p0(i64 -1, ptr %z) + call void @llvm.lifetime.end.p0(ptr %y) + call void @llvm.lifetime.start.p0(ptr %z) ; CHECK: getelementptr i8, ptr %[[USP]], i32 -8 call void @capture64(ptr %z) - call void @llvm.lifetime.end.p0(i64 -1, ptr %z) + call void @llvm.lifetime.end.p0(ptr %z) ret void } @@ -95,9 +95,9 @@ entry: %z = alloca i64, align 4 %y = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 -1, ptr %x) - call void @llvm.lifetime.start.p0(i64 -1, ptr %y) - call void @llvm.lifetime.start.p0(i64 -1, ptr %z) + call void @llvm.lifetime.start.p0(ptr %x) + call void @llvm.lifetime.start.p0(ptr %y) + call void @llvm.lifetime.start.p0(ptr %z) ; CHECK: getelementptr i8, ptr %[[USP]], i32 -16 call void @capture32(ptr %x) @@ -108,9 +108,9 @@ entry: ; CHECK: getelementptr i8, ptr %[[USP]], i32 -8 call void @capture64(ptr %z) - call void @llvm.lifetime.end.p0(i64 -1, ptr %x) - call void @llvm.lifetime.end.p0(i64 -1, ptr %y) - call void @llvm.lifetime.end.p0(i64 -1, ptr %z) + call void @llvm.lifetime.end.p0(ptr %x) + call void @llvm.lifetime.end.p0(ptr %y) + call void @llvm.lifetime.end.p0(ptr %z) ret void } @@ -147,8 +147,8 @@ entry: %z = alloca i64, align 8 %z1 = alloca i64, align 8 %z2 = alloca i64, align 8 - call void @llvm.lifetime.start.p0(i64 -1, ptr %x1) - call void @llvm.lifetime.start.p0(i64 -1, ptr %x2) + call void @llvm.lifetime.start.p0(ptr %x1) + call void @llvm.lifetime.start.p0(ptr %x2) ; CHECK: getelementptr i8, ptr %[[USP]], i32 -8 ; CHECK: call void @capture64( call void @capture64(ptr nonnull %x1) @@ -158,62 +158,62 @@ entry: br i1 %a, label %if.then, label %if.else4 if.then: ; preds = %entry - call void @llvm.lifetime.start.p0(i64 -1, ptr %y) + call void @llvm.lifetime.start.p0(ptr %y) ; CHECK: getelementptr i8, ptr %[[USP]], i32 -24 ; CHECK: call void @capture64( call void @capture64(ptr nonnull %y) br i1 %b, label %if.then3, label %if.else if.then3: ; preds = %if.then - call void @llvm.lifetime.start.p0(i64 -1, ptr %y1) + call void @llvm.lifetime.start.p0(ptr %y1) ; CHECK: getelementptr i8, ptr %[[USP]], i32 -32 ; CHECK: call void @capture64( call void @capture64(ptr nonnull %y1) - call void @llvm.lifetime.end.p0(i64 -1, ptr %y1) + call void @llvm.lifetime.end.p0(ptr %y1) br label %if.end if.else: ; preds = %if.then - call void @llvm.lifetime.start.p0(i64 -1, ptr %y2) + call void @llvm.lifetime.start.p0(ptr %y2) ; CHECK: getelementptr i8, ptr %[[USP]], i32 -32 ; CHECK: call void @capture64( call void @capture64(ptr nonnull %y2) - call void @llvm.lifetime.end.p0(i64 -1, ptr %y2) + call void @llvm.lifetime.end.p0(ptr %y2) br label %if.end if.end: ; preds = %if.else, %if.then3 - call void @llvm.lifetime.end.p0(i64 -1, ptr %y) + call void @llvm.lifetime.end.p0(ptr %y) br label %if.end9 if.else4: ; preds = %entry - call void @llvm.lifetime.start.p0(i64 -1, ptr %z) + call void @llvm.lifetime.start.p0(ptr %z) ; CHECK: getelementptr i8, ptr %[[USP]], i32 -24 ; CHECK: call void @capture64( call void @capture64(ptr nonnull %z) br i1 %b, label %if.then6, label %if.else7 if.then6: ; preds = %if.else4 - call void @llvm.lifetime.start.p0(i64 -1, ptr %z1) + call void @llvm.lifetime.start.p0(ptr %z1) ; CHECK: getelementptr i8, ptr %[[USP]], i32 -32 ; CHECK: call void @capture64( call void @capture64(ptr nonnull %z1) - call void @llvm.lifetime.end.p0(i64 -1, ptr %z1) + call void @llvm.lifetime.end.p0(ptr %z1) br label %if.end8 if.else7: ; preds = %if.else4 - call void @llvm.lifetime.start.p0(i64 -1, ptr %z2) + call void @llvm.lifetime.start.p0(ptr %z2) ; CHECK: getelementptr i8, ptr %[[USP]], i32 -32 ; CHECK: call void @capture64( call void @capture64(ptr nonnull %z2) - call void @llvm.lifetime.end.p0(i64 -1, ptr %z2) + call void @llvm.lifetime.end.p0(ptr %z2) br label %if.end8 if.end8: ; preds = %if.else7, %if.then6 - call void @llvm.lifetime.end.p0(i64 -1, ptr %z) + call void @llvm.lifetime.end.p0(ptr %z) br label %if.end9 if.end9: ; preds = %if.end8, %if.end - call void @llvm.lifetime.end.p0(i64 -1, ptr %x2) - call void @llvm.lifetime.end.p0(i64 -1, ptr %x1) + call void @llvm.lifetime.end.p0(ptr %x2) + call void @llvm.lifetime.end.p0(ptr %x1) ret void } @@ -225,21 +225,21 @@ entry: ; CHECK-NEXT: getelementptr i8, ptr %[[USP]], i32 -16 %x = alloca i32, align 4 %y = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 -1, ptr %x) + call void @llvm.lifetime.start.p0(ptr %x) ; CHECK: getelementptr i8, ptr %[[USP]], i32 -4 ; CHECK: call void @capture32( call void @capture32(ptr %x) br i1 %d, label %bb2, label %bb3 bb2: - call void @llvm.lifetime.start.p0(i64 -1, ptr %y) + call void @llvm.lifetime.start.p0(ptr %y) ; CHECK: getelementptr i8, ptr %[[USP]], i32 -8 ; CHECK: call void @capture32( call void @capture32(ptr %y) - call void @llvm.lifetime.end.p0(i64 -1, ptr %y) - call void @llvm.lifetime.end.p0(i64 -1, ptr %x) + call void @llvm.lifetime.end.p0(ptr %y) + call void @llvm.lifetime.end.p0(ptr %x) ret void bb3: - call void @llvm.lifetime.end.p0(i64 -1, ptr %x) + call void @llvm.lifetime.end.p0(ptr %x) ret void } @@ -250,18 +250,18 @@ entry: ; CHECK-NEXT: getelementptr i8, ptr %[[USP]], i32 -16 %x = alloca i32, align 4 %y = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 -1, ptr %x) + call void @llvm.lifetime.start.p0(ptr %x) ; CHECK: getelementptr i8, ptr %[[USP]], i32 -4 ; CHECK: call void @capture32( call void @capture32(ptr %x) - call void @llvm.lifetime.end.p0(i64 -1, ptr %x) + call void @llvm.lifetime.end.p0(ptr %x) br i1 %d, label %bb2, label %bb3 bb2: - call void @llvm.lifetime.start.p0(i64 -1, ptr %y) + call void @llvm.lifetime.start.p0(ptr %y) ; CHECK: getelementptr i8, ptr %[[USP]], i32 -4 ; CHECK: call void @capture32( call void @capture32(ptr %y) - call void @llvm.lifetime.end.p0(i64 -1, ptr %y) + call void @llvm.lifetime.end.p0(ptr %y) ret void bb3: ret void @@ -275,14 +275,14 @@ entry: ; CHECK-NEXT: getelementptr i8, ptr %[[USP]], i32 -16 %x = alloca i32, align 4 %y = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 -1, ptr %x) + call void @llvm.lifetime.start.p0(ptr %x) ; CHECK: getelementptr i8, ptr %[[USP]], i32 -4 ; CHECK: call void @capture32( call void @capture32(ptr %x) - call void @llvm.lifetime.end.p0(i64 -1, ptr %x) + call void @llvm.lifetime.end.p0(ptr %x) br i1 %d, label %bb2, label %bb3 bb2: - call void @llvm.lifetime.start.p0(i64 -1, ptr %y) + call void @llvm.lifetime.start.p0(ptr %y) ; CHECK: getelementptr i8, ptr %[[USP]], i32 -4 ; CHECK: call void @capture32( call void @capture32(ptr %y) @@ -299,14 +299,14 @@ entry: ; CHECK-NEXT: getelementptr i8, ptr %[[USP]], i32 -16 %x = alloca i32, align 4 %y = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 -1, ptr %x) + call void @llvm.lifetime.start.p0(ptr %x) ; CHECK: getelementptr i8, ptr %[[USP]], i32 -4 ; CHECK: call void @capture32( call void @capture32(ptr %x) br i1 %d, label %bb2, label %bb3 bb2: - call void @llvm.lifetime.end.p0(i64 -1, ptr %x) - call void @llvm.lifetime.start.p0(i64 -1, ptr %y) + call void @llvm.lifetime.end.p0(ptr %x) + call void @llvm.lifetime.start.p0(ptr %y) ; CHECK: getelementptr i8, ptr %[[USP]], i32 -4 ; CHECK: call void @capture32( call void @capture32(ptr %y) @@ -326,10 +326,10 @@ entry: ; CHECK: getelementptr i8, ptr %[[USP]], i32 -4 ; CHECK: call void @capture32( call void @capture32(ptr %x) - call void @llvm.lifetime.end.p0(i64 -1, ptr %x) + call void @llvm.lifetime.end.p0(ptr %x) br i1 %d, label %bb2, label %bb3 bb2: - call void @llvm.lifetime.start.p0(i64 -1, ptr %y) + call void @llvm.lifetime.start.p0(ptr %y) ; CHECK: getelementptr i8, ptr %[[USP]], i32 -8 ; CHECK: call void @capture32( call void @capture32(ptr %y) @@ -347,26 +347,26 @@ entry: %B.i2 = alloca [100 x i32], align 4 %A.i = alloca [100 x i32], align 4 %B.i = alloca [100 x i32], align 4 - call void @llvm.lifetime.start.p0(i64 -1, ptr %A.i) - call void @llvm.lifetime.start.p0(i64 -1, ptr %B.i) + call void @llvm.lifetime.start.p0(ptr %A.i) + call void @llvm.lifetime.start.p0(ptr %B.i) ; CHECK: getelementptr i8, ptr %[[USP]], i32 -400 ; CHECK: call void @capture100x32( call void @capture100x32(ptr %A.i) ; CHECK: getelementptr i8, ptr %[[USP]], i32 -800 ; CHECK: call void @capture100x32( call void @capture100x32(ptr %B.i) - call void @llvm.lifetime.end.p0(i64 -1, ptr %A.i) - call void @llvm.lifetime.end.p0(i64 -1, ptr %B.i) - call void @llvm.lifetime.start.p0(i64 -1, ptr %A.i1) - call void @llvm.lifetime.start.p0(i64 -1, ptr %B.i2) + call void @llvm.lifetime.end.p0(ptr %A.i) + call void @llvm.lifetime.end.p0(ptr %B.i) + call void @llvm.lifetime.start.p0(ptr %A.i1) + call void @llvm.lifetime.start.p0(ptr %B.i2) ; CHECK: getelementptr i8, ptr %[[USP]], i32 -400 ; CHECK: call void @capture100x32( call void @capture100x32(ptr %A.i1) ; CHECK: getelementptr i8, ptr %[[USP]], i32 -800 ; CHECK: call void @capture100x32( call void @capture100x32(ptr %B.i2) - call void @llvm.lifetime.end.p0(i64 -1, ptr %A.i1) - call void @llvm.lifetime.end.p0(i64 -1, ptr %B.i2) + call void @llvm.lifetime.end.p0(ptr %A.i1) + call void @llvm.lifetime.end.p0(ptr %B.i2) ret void } @@ -378,11 +378,11 @@ entry: %buf1 = alloca i8, i32 100000, align 16 %buf2 = alloca i8, i32 100000, align 16 - call void @llvm.lifetime.start.p0(i64 -1, ptr %buf1) - call void @llvm.lifetime.end.p0(i64 -1, ptr %buf1) + call void @llvm.lifetime.start.p0(ptr %buf1) + call void @llvm.lifetime.end.p0(ptr %buf1) - call void @llvm.lifetime.start.p0(i64 -1, ptr %buf1) - call void @llvm.lifetime.start.p0(i64 -1, ptr %buf2) + call void @llvm.lifetime.start.p0(ptr %buf1) + call void @llvm.lifetime.start.p0(ptr %buf2) call void @capture8(ptr %buf1) call void @capture8(ptr %buf2) ret void @@ -404,12 +404,12 @@ entry: %B.i2 = alloca [100 x i32], align 4 %A.i = alloca [100 x i32], align 4 %B.i = alloca [100 x i32], align 4 - call void @llvm.lifetime.start.p0(i64 -1, ptr %A.i) nounwind - call void @llvm.lifetime.start.p0(i64 -1, ptr %B.i) nounwind + call void @llvm.lifetime.start.p0(ptr %A.i) nounwind + call void @llvm.lifetime.start.p0(ptr %B.i) nounwind call void @capture100x32(ptr %A.i) call void @capture100x32(ptr %B.i) - call void @llvm.lifetime.end.p0(i64 -1, ptr %A.i) nounwind - call void @llvm.lifetime.end.p0(i64 -1, ptr %B.i) nounwind + call void @llvm.lifetime.end.p0(ptr %A.i) nounwind + call void @llvm.lifetime.end.p0(ptr %B.i) nounwind br label %block2 block2: @@ -429,13 +429,13 @@ entry: %a.i = alloca [4 x %struct.Klass], align 16 %b.i = alloca [4 x %struct.Klass], align 16 ; I am used outside the lifetime zone below: - call void @llvm.lifetime.start.p0(i64 -1, ptr %a.i) - call void @llvm.lifetime.start.p0(i64 -1, ptr %b.i) + call void @llvm.lifetime.start.p0(ptr %a.i) + call void @llvm.lifetime.start.p0(ptr %b.i) call void @capture8(ptr %a.i) call void @capture8(ptr %b.i) %z3 = load i32, ptr %a.i, align 16 - call void @llvm.lifetime.end.p0(i64 -1, ptr %a.i) - call void @llvm.lifetime.end.p0(i64 -1, ptr %b.i) + call void @llvm.lifetime.end.p0(ptr %a.i) + call void @llvm.lifetime.end.p0(ptr %b.i) ret i32 %z3 } @@ -445,12 +445,12 @@ entry: ; CHECK: %[[USP:.*]] = load ptr, ptr @__safestack_unsafe_stack_ptr ; CHECK-NEXT: getelementptr i8, ptr %[[USP]], i32 -16 %x = alloca i8, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr %x) nounwind + call void @llvm.lifetime.start.p0(ptr %x) nounwind br label %l2 l2: call void @capture8(ptr %x) - call void @llvm.lifetime.end.p0(i64 4, ptr %x) nounwind + call void @llvm.lifetime.end.p0(ptr %x) nounwind br label %l2 } @@ -463,25 +463,25 @@ entry: ; CHECK-NEXT: getelementptr i8, ptr %[[USP]], i32 -16 %x = alloca i8, align 4 %y = alloca i8, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr %x) nounwind + call void @llvm.lifetime.start.p0(ptr %x) nounwind br label %l2 l2: ; CHECK: getelementptr i8, ptr %[[USP]], i32 -8 - call void @llvm.lifetime.start.p0(i64 4, ptr %y) nounwind + call void @llvm.lifetime.start.p0(ptr %y) nounwind call void @capture8(ptr %y) - call void @llvm.lifetime.end.p0(i64 4, ptr %y) nounwind + call void @llvm.lifetime.end.p0(ptr %y) nounwind ; CHECK: getelementptr i8, ptr %[[USP]], i32 -4 - call void @llvm.lifetime.start.p0(i64 4, ptr %x) nounwind + call void @llvm.lifetime.start.p0(ptr %x) nounwind call void @capture8(ptr %x) br label %l2 } attributes #0 = { safestack } -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) declare void @capture8(ptr) declare void @capture32(ptr) declare void @capture64(ptr) diff --git a/llvm/test/Transforms/SafeStack/X86/debug-loc2.ll b/llvm/test/Transforms/SafeStack/X86/debug-loc2.ll index 7a1fdc0..e60522f 100644 --- a/llvm/test/Transforms/SafeStack/X86/debug-loc2.ll +++ b/llvm/test/Transforms/SafeStack/X86/debug-loc2.ll @@ -43,12 +43,12 @@ entry: } ; Function Attrs: argmemonly nounwind -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 declare void @capture(ptr) #2 ; Function Attrs: argmemonly nounwind -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 ; Function Attrs: nounwind readnone declare void @llvm.dbg.value(metadata, metadata, metadata) #3 diff --git a/llvm/test/Transforms/SafeStack/X86/layout-frag.ll b/llvm/test/Transforms/SafeStack/X86/layout-frag.ll index b858fd6..8a5362b 100644 --- a/llvm/test/Transforms/SafeStack/X86/layout-frag.ll +++ b/llvm/test/Transforms/SafeStack/X86/layout-frag.ll @@ -13,16 +13,16 @@ entry: %x2 = alloca i64, align 8 - call void @llvm.lifetime.start.p0(i64 8, ptr %x0) + call void @llvm.lifetime.start.p0(ptr %x0) call void @capture64(ptr %x0) - call void @llvm.lifetime.end.p0(i64 8, ptr %x0) + call void @llvm.lifetime.end.p0(ptr %x0) - call void @llvm.lifetime.start.p0(i64 1, ptr %x1) - call void @llvm.lifetime.start.p0(i64 8, ptr %x2) + call void @llvm.lifetime.start.p0(ptr %x1) + call void @llvm.lifetime.start.p0(ptr %x2) call void @capture8(ptr %x1) call void @capture64(ptr %x2) - call void @llvm.lifetime.end.p0(i64 1, ptr %x1) - call void @llvm.lifetime.end.p0(i64 8, ptr %x2) + call void @llvm.lifetime.end.p0(ptr %x1) + call void @llvm.lifetime.end.p0(ptr %x2) ; Test that i64 allocas share space. ; CHECK: getelementptr i8, ptr %unsafe_stack_ptr, i32 -8 @@ -32,7 +32,7 @@ entry: ret void } -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) declare void @capture8(ptr) declare void @capture64(ptr) diff --git a/llvm/test/Transforms/SafeStack/X86/no-crash-on-lifetime.ll b/llvm/test/Transforms/SafeStack/X86/no-crash-on-lifetime.ll index 76c638e..c01ca2f 100644 --- a/llvm/test/Transforms/SafeStack/X86/no-crash-on-lifetime.ll +++ b/llvm/test/Transforms/SafeStack/X86/no-crash-on-lifetime.ll @@ -9,9 +9,9 @@ define dso_local void @_ZN1s1tE1F(ptr byval(%class.F) %g) local_unnamed_addr saf entry: %ref.tmp.i.i.i = alloca i64, align 1 call void undef(ptr %g) - call void @llvm.lifetime.start.p0(i64 3, ptr %ref.tmp.i.i.i) + call void @llvm.lifetime.start.p0(ptr %ref.tmp.i.i.i) ret void } -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 diff --git a/llvm/test/Transforms/SampleProfile/csspgo-import-list-callee-samples.ll b/llvm/test/Transforms/SampleProfile/csspgo-import-list-callee-samples.ll index ba66548..3ae0aea 100644 --- a/llvm/test/Transforms/SampleProfile/csspgo-import-list-callee-samples.ll +++ b/llvm/test/Transforms/SampleProfile/csspgo-import-list-callee-samples.ll @@ -61,10 +61,10 @@ entry: } ; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #3 +declare void @llvm.lifetime.start.p0(ptr nocapture) #3 ; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #3 +declare void @llvm.lifetime.end.p0(ptr nocapture) #3 ; Function Attrs: nounwind uwtable define dso_local i32 @main() #0 !dbg !28 { diff --git a/llvm/test/Transforms/SampleProfile/entry_counts_cold.ll b/llvm/test/Transforms/SampleProfile/entry_counts_cold.ll index 5fe80db..c7617c1 100644 --- a/llvm/test/Transforms/SampleProfile/entry_counts_cold.ll +++ b/llvm/test/Transforms/SampleProfile/entry_counts_cold.ll @@ -43,7 +43,7 @@ entry: %a = alloca i32, align 4 store ptr %p, ptr %p.addr, align 8, !tbaa !15 call void @llvm.dbg.declare(metadata ptr %p.addr, metadata !33, metadata !DIExpression()), !dbg !35 - call void @llvm.lifetime.start.p0(i64 4, ptr %a) #4, !dbg !36 + call void @llvm.lifetime.start.p0(ptr %a) #4, !dbg !36 call void @llvm.dbg.declare(metadata ptr %a, metadata !34, metadata !DIExpression()), !dbg !37 %0 = load ptr, ptr %p.addr, align 8, !dbg !38, !tbaa !15 %arrayidx = getelementptr inbounds i32, ptr %0, i64 3, !dbg !38 @@ -58,7 +58,7 @@ entry: store i32 %call, ptr %a, align 4, !dbg !43, !tbaa !25 %5 = load i32, ptr %a, align 4, !dbg !44, !tbaa !25 %add2 = add nsw i32 %5, 1, !dbg !45 - call void @llvm.lifetime.end.p0(i64 4, ptr %a) #4, !dbg !46 + call void @llvm.lifetime.end.p0(ptr %a) #4, !dbg !46 ret i32 %add2, !dbg !47 } @@ -86,10 +86,10 @@ entry: } ; Function Attrs: argmemonly nounwind -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #2 +declare void @llvm.lifetime.start.p0(ptr nocapture) #2 ; Function Attrs: argmemonly nounwind -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #2 +declare void @llvm.lifetime.end.p0(ptr nocapture) #2 declare void @baz(...) #3 diff --git a/llvm/test/Transforms/SampleProfile/entry_counts_missing_dbginfo.ll b/llvm/test/Transforms/SampleProfile/entry_counts_missing_dbginfo.ll index b8e1064..0e62921 100644 --- a/llvm/test/Transforms/SampleProfile/entry_counts_missing_dbginfo.ll +++ b/llvm/test/Transforms/SampleProfile/entry_counts_missing_dbginfo.ll @@ -53,7 +53,7 @@ entry: %a = alloca i32, align 4 store ptr %p, ptr %p.addr, align 8, !tbaa !15 call void @llvm.dbg.declare(metadata ptr %p.addr, metadata !33, metadata !DIExpression()), !dbg !35 - call void @llvm.lifetime.start.p0(i64 4, ptr %a) #4, !dbg !36 + call void @llvm.lifetime.start.p0(ptr %a) #4, !dbg !36 call void @llvm.dbg.declare(metadata ptr %a, metadata !34, metadata !DIExpression()), !dbg !37 %0 = load ptr, ptr %p.addr, align 8, !dbg !38, !tbaa !15 %arrayidx = getelementptr inbounds i32, ptr %0, i64 3, !dbg !38 @@ -68,7 +68,7 @@ entry: store i32 %call, ptr %a, align 4, !dbg !43, !tbaa !25 %5 = load i32, ptr %a, align 4, !dbg !44, !tbaa !25 %add2 = add nsw i32 %5, 1, !dbg !45 - call void @llvm.lifetime.end.p0(i64 4, ptr %a) #4, !dbg !46 + call void @llvm.lifetime.end.p0(ptr %a) #4, !dbg !46 ret i32 %add2, !dbg !47 } @@ -96,10 +96,10 @@ entry: } ; Function Attrs: argmemonly nounwind -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #2 +declare void @llvm.lifetime.start.p0(ptr nocapture) #2 ; Function Attrs: argmemonly nounwind -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #2 +declare void @llvm.lifetime.end.p0(ptr nocapture) #2 declare void @baz(...) #3 diff --git a/llvm/test/Transforms/SampleProfile/non-probe-stale-profile-matching.ll b/llvm/test/Transforms/SampleProfile/non-probe-stale-profile-matching.ll index 3ca94a4..2b091a1 100644 --- a/llvm/test/Transforms/SampleProfile/non-probe-stale-profile-matching.ll +++ b/llvm/test/Transforms/SampleProfile/non-probe-stale-profile-matching.ll @@ -151,10 +151,10 @@ for.end: ; preds = %cleanup, %if.then } ; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #3 +declare void @llvm.lifetime.start.p0(ptr nocapture) #3 ; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #3 +declare void @llvm.lifetime.end.p0(ptr nocapture) #3 attributes #0 = { noinline nounwind uwtable "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cmov,+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" "use-sample-profile" } attributes #1 = { alwaysinline nounwind uwtable "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cmov,+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" "use-sample-profile" } diff --git a/llvm/test/Transforms/SampleProfile/profile-mismatch.ll b/llvm/test/Transforms/SampleProfile/profile-mismatch.ll index 42bc1b8..0a1b896 100644 --- a/llvm/test/Transforms/SampleProfile/profile-mismatch.ll +++ b/llvm/test/Transforms/SampleProfile/profile-mismatch.ll @@ -43,13 +43,13 @@ define dso_local i32 @foo(i32 noundef %x) #0 !dbg !12 { entry: %y = alloca i32, align 4 call void @llvm.dbg.value(metadata i32 %x, metadata !16, metadata !DIExpression()), !dbg !18 - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %y), !dbg !19 + call void @llvm.lifetime.start.p0(ptr nonnull %y), !dbg !19 call void @llvm.dbg.declare(metadata ptr %y, metadata !17, metadata !DIExpression()), !dbg !20 %add = add nsw i32 %x, 1, !dbg !21 store volatile i32 %add, ptr %y, align 4, !dbg !20, !tbaa !22 %y.0. = load volatile i32, ptr %y, align 4, !dbg !26, !tbaa !22 %add1 = add nsw i32 %y.0., 1, !dbg !27 - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %y), !dbg !28 + call void @llvm.lifetime.end.p0(ptr nonnull %y), !dbg !28 ret i32 %add1, !dbg !29 } @@ -57,10 +57,10 @@ entry: declare void @llvm.dbg.declare(metadata, metadata, metadata) #1 ; Function Attrs: argmemonly mustprogress nocallback nofree nosync nounwind willreturn -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #2 +declare void @llvm.lifetime.start.p0(ptr nocapture) #2 ; Function Attrs: argmemonly mustprogress nocallback nofree nosync nounwind willreturn -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #2 +declare void @llvm.lifetime.end.p0(ptr nocapture) #2 ; Function Attrs: noinline nounwind uwtable define dso_local i32 @bar(i32 noundef %x) #3 !dbg !30 { diff --git a/llvm/test/Transforms/SampleProfile/pseudo-probe-discriminator.ll b/llvm/test/Transforms/SampleProfile/pseudo-probe-discriminator.ll index 6d4429b..26ae198 100644 --- a/llvm/test/Transforms/SampleProfile/pseudo-probe-discriminator.ll +++ b/llvm/test/Transforms/SampleProfile/pseudo-probe-discriminator.ll @@ -31,8 +31,8 @@ bb3: } declare void @_Z3barv() #1 -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind argmemonly -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind argmemonly +declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind argmemonly +declare void @llvm.lifetime.end.p0(ptr nocapture) nounwind argmemonly attributes #0 = { uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" } attributes #1 = { "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" } diff --git a/llvm/test/Transforms/SampleProfile/pseudo-probe-icp-factor.ll b/llvm/test/Transforms/SampleProfile/pseudo-probe-icp-factor.ll index b662efa..383289e 100644 --- a/llvm/test/Transforms/SampleProfile/pseudo-probe-icp-factor.ll +++ b/llvm/test/Transforms/SampleProfile/pseudo-probe-icp-factor.ll @@ -60,12 +60,12 @@ bb: %i3 = alloca i32, align 4 store i32 0, ptr %i, align 4 call void @llvm.pseudoprobe(i64 -2624081020897602054, i64 1, i32 0, i64 -1), !dbg !62 - call void @llvm.lifetime.start.p0(i64 8, ptr %i1), !dbg !62 + call void @llvm.lifetime.start.p0(ptr %i1), !dbg !62 call void @llvm.dbg.declare(metadata ptr %i1, metadata !57, metadata !DIExpression()), !dbg !63 - call void @llvm.lifetime.start.p0(i64 4, ptr %i2), !dbg !64 + call void @llvm.lifetime.start.p0(ptr %i2), !dbg !64 call void @llvm.dbg.declare(metadata ptr %i2, metadata !59, metadata !DIExpression()), !dbg !65 store i32 0, ptr %i2, align 4, !dbg !65, !tbaa !19 - call void @llvm.lifetime.start.p0(i64 4, ptr %i3), !dbg !66 + call void @llvm.lifetime.start.p0(ptr %i3), !dbg !66 call void @llvm.dbg.declare(metadata ptr %i3, metadata !60, metadata !DIExpression()), !dbg !67 store i32 0, ptr %i3, align 4, !dbg !67, !tbaa !19 br label %bb7, !dbg !66 @@ -78,7 +78,7 @@ bb7: ; preds = %bb25, %bb bb10: ; preds = %bb7 call void @llvm.pseudoprobe(i64 -2624081020897602054, i64 3, i32 0, i64 -1), !dbg !72 - call void @llvm.lifetime.end.p0(i64 4, ptr %i3), !dbg !72 + call void @llvm.lifetime.end.p0(ptr %i3), !dbg !72 br label %bb28 bb12: ; preds = %bb7 @@ -119,16 +119,16 @@ bb28: ; preds = %bb10 call void @llvm.pseudoprobe(i64 -2624081020897602054, i64 9, i32 0, i64 -1), !dbg !92 %i29 = load i32, ptr %i2, align 4, !dbg !92, !tbaa !19 %i30 = call i32 (ptr, ...) @printf(ptr @.str, i32 %i29), !dbg !93 - call void @llvm.lifetime.end.p0(i64 4, ptr %i2), !dbg !95 - call void @llvm.lifetime.end.p0(i64 8, ptr %i1), !dbg !95 + call void @llvm.lifetime.end.p0(ptr %i2), !dbg !95 + call void @llvm.lifetime.end.p0(ptr %i1), !dbg !95 ret i32 0, !dbg !96 } ; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #2 +declare void @llvm.lifetime.start.p0(ptr nocapture) #2 ; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #2 +declare void @llvm.lifetime.end.p0(ptr nocapture) #2 declare dso_local i32 @printf(ptr, ...) diff --git a/llvm/test/Transforms/SampleProfile/pseudo-probe-profile-mismatch.ll b/llvm/test/Transforms/SampleProfile/pseudo-probe-profile-mismatch.ll index 22317e6..e1d717c 100644 --- a/llvm/test/Transforms/SampleProfile/pseudo-probe-profile-mismatch.ll +++ b/llvm/test/Transforms/SampleProfile/pseudo-probe-profile-mismatch.ll @@ -70,14 +70,14 @@ define dso_local i32 @foo(i32 noundef %x) #0 !dbg !16 { entry: %y = alloca i32, align 4 call void @llvm.dbg.value(metadata i32 %x, metadata !20, metadata !DIExpression()), !dbg !22 - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %y), !dbg !23 + call void @llvm.lifetime.start.p0(ptr nonnull %y), !dbg !23 call void @llvm.dbg.declare(metadata ptr %y, metadata !21, metadata !DIExpression()), !dbg !24 call void @llvm.pseudoprobe(i64 6699318081062747564, i64 1, i32 0, i64 -1), !dbg !25 %add = add nsw i32 %x, 1, !dbg !26 store volatile i32 %add, ptr %y, align 4, !dbg !24, !tbaa !27 %y.0. = load volatile i32, ptr %y, align 4, !dbg !31, !tbaa !27 %add1 = add nsw i32 %y.0., 1, !dbg !32 - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %y), !dbg !33 + call void @llvm.lifetime.end.p0(ptr nonnull %y), !dbg !33 ret i32 %add1, !dbg !34 } @@ -85,10 +85,10 @@ entry: declare void @llvm.dbg.declare(metadata, metadata, metadata) #1 ; Function Attrs: argmemonly mustprogress nocallback nofree nosync nounwind willreturn -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #2 +declare void @llvm.lifetime.start.p0(ptr nocapture) #2 ; Function Attrs: argmemonly mustprogress nocallback nofree nosync nounwind willreturn -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #2 +declare void @llvm.lifetime.end.p0(ptr nocapture) #2 ; Function Attrs: noinline nounwind uwtable define dso_local i32 @bar(i32 noundef %x) #3 !dbg !35 { diff --git a/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-matching-LCS.ll b/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-matching-LCS.ll index cdd365b..c0976de 100644 --- a/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-matching-LCS.ll +++ b/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-matching-LCS.ll @@ -119,10 +119,10 @@ if.end: ; preds = %if.else, %if.then } ; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #3 +declare void @llvm.lifetime.start.p0(ptr nocapture) #3 ; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #3 +declare void @llvm.lifetime.end.p0(ptr nocapture) #3 ; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(inaccessiblemem: readwrite) declare void @llvm.pseudoprobe(i64, i64, i32, i64) #4 diff --git a/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-matching.ll b/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-matching.ll index 20be0c2..0c38d9c 100644 --- a/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-matching.ll +++ b/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-matching.ll @@ -217,10 +217,10 @@ for.end: ; preds = %cleanup, %if.then } ; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #4 +declare void @llvm.lifetime.start.p0(ptr nocapture) #4 ; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #4 +declare void @llvm.lifetime.end.p0(ptr nocapture) #4 ; Function Attrs: mustprogress nocallback nofree nosync nounwind speculatable willreturn memory(none) declare void @llvm.dbg.assign(metadata, metadata, metadata, metadata, metadata, metadata) #1 diff --git a/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-name-similarity.ll b/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-name-similarity.ll index 4e435f4..dbf3dda 100644 --- a/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-name-similarity.ll +++ b/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-name-similarity.ll @@ -57,10 +57,10 @@ for.body: ; preds = %for.cond } ; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) -declare void @llvm.lifetime.start.p0(i64 immarg, ptr captures(none)) #2 +declare void @llvm.lifetime.start.p0(ptr captures(none)) #2 ; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr captures(none)) #2 +declare void @llvm.lifetime.end.p0(ptr captures(none)) #2 ; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(inaccessiblemem: readwrite) declare void @llvm.pseudoprobe(i64, i64, i32, i64) #3 diff --git a/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-renaming-recursive.ll b/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-renaming-recursive.ll index d9db804..e246d26 100644 --- a/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-renaming-recursive.ll +++ b/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-renaming-recursive.ll @@ -70,13 +70,13 @@ for.body: ; preds = %for.cond } ; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 ; Function Attrs: mustprogress nocallback nofree nosync nounwind speculatable willreturn memory(none) declare void @llvm.dbg.declare(metadata, metadata, metadata) #2 ; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 ; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(inaccessiblemem: readwrite) declare void @llvm.pseudoprobe(i64, i64, i32, i64) #3 diff --git a/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-renaming.ll b/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-renaming.ll index 6bf09ce..d1c5a9d 100644 --- a/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-renaming.ll +++ b/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-renaming.ll @@ -175,10 +175,10 @@ for.body: ; preds = %for.cond } ; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #3 +declare void @llvm.lifetime.start.p0(ptr nocapture) #3 ; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #3 +declare void @llvm.lifetime.end.p0(ptr nocapture) #3 ; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(inaccessiblemem: readwrite) declare void @llvm.pseudoprobe(i64, i64, i32, i64) #4 diff --git a/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-toplev-func.ll b/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-toplev-func.ll index c839364f..2ed1872 100644 --- a/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-toplev-func.ll +++ b/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-toplev-func.ll @@ -85,10 +85,10 @@ for.body: ; preds = %for.cond } ; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #2 +declare void @llvm.lifetime.start.p0(ptr nocapture) #2 ; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #2 +declare void @llvm.lifetime.end.p0(ptr nocapture) #2 ; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(inaccessiblemem: readwrite) declare void @llvm.pseudoprobe(i64, i64, i32, i64) #3 diff --git a/llvm/test/Transforms/SampleProfile/remarks.ll b/llvm/test/Transforms/SampleProfile/remarks.ll index 9c0143a..3cb91b7 100644 --- a/llvm/test/Transforms/SampleProfile/remarks.ll +++ b/llvm/test/Transforms/SampleProfile/remarks.ll @@ -121,10 +121,10 @@ define i64 @_Z3foov() #0 !dbg !4 { entry: %sum = alloca i64, align 8 %i = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 8, ptr %sum) #4, !dbg !19 + call void @llvm.lifetime.start.p0(ptr %sum) #4, !dbg !19 call void @llvm.dbg.declare(metadata ptr %sum, metadata !9, metadata !20), !dbg !21 store i64 0, ptr %sum, align 8, !dbg !21, !tbaa !22 - call void @llvm.lifetime.start.p0(i64 4, ptr %i) #4, !dbg !26 + call void @llvm.lifetime.start.p0(ptr %i) #4, !dbg !26 call void @llvm.dbg.declare(metadata ptr %i, metadata !10, metadata !20), !dbg !27 store i32 0, ptr %i, align 4, !dbg !27, !tbaa !28 br label %for.cond, !dbg !26 @@ -135,7 +135,7 @@ for.cond: ; preds = %for.inc, %entry br i1 %cmp, label %for.body, label %for.cond.cleanup, !dbg !35 for.cond.cleanup: ; preds = %for.cond - call void @llvm.lifetime.end.p0(i64 4, ptr %i) #4, !dbg !36 + call void @llvm.lifetime.end.p0(ptr %i) #4, !dbg !36 br label %for.end for.body: ; preds = %for.cond @@ -173,12 +173,12 @@ for.inc: ; preds = %if.end for.end: ; preds = %for.cond.cleanup %7 = load i64, ptr %sum, align 8, !dbg !53, !tbaa !22 - call void @llvm.lifetime.end.p0(i64 8, ptr %sum) #4, !dbg !54 + call void @llvm.lifetime.end.p0(ptr %sum) #4, !dbg !54 ret i64 %7, !dbg !55 } ; Function Attrs: nounwind argmemonly -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 ; Function Attrs: nounwind readnone declare void @llvm.dbg.declare(metadata, metadata, metadata) #2 @@ -189,7 +189,7 @@ define i32 @rand() #3 !dbg !59 { } ; Function Attrs: nounwind argmemonly -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 ; Function Attrs: nounwind uwtable define i32 @main() #0 !dbg !13 { diff --git a/llvm/test/Transforms/SimplifyCFG/X86/critedge-assume.ll b/llvm/test/Transforms/SimplifyCFG/X86/critedge-assume.ll index 58ca8df..99e908e 100644 --- a/llvm/test/Transforms/SimplifyCFG/X86/critedge-assume.ll +++ b/llvm/test/Transforms/SimplifyCFG/X86/critedge-assume.ll @@ -56,7 +56,7 @@ while.end: ret void } -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) declare i32 @_ZNK1F5beginEv(ptr) @@ -68,7 +68,7 @@ declare noalias nonnull ptr @_Znwm(i64) declare void @_ZN1B6appendEv(ptr) -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) declare i1 @llvm.type.test(ptr, metadata) diff --git a/llvm/test/Transforms/SimplifyCFG/X86/empty-cleanuppad.ll b/llvm/test/Transforms/SimplifyCFG/X86/empty-cleanuppad.ll index 162a3ab..1499eecb 100644 --- a/llvm/test/Transforms/SimplifyCFG/X86/empty-cleanuppad.ll +++ b/llvm/test/Transforms/SimplifyCFG/X86/empty-cleanuppad.ll @@ -437,7 +437,7 @@ define i32 @f9() personality ptr @__CxxFrameHandler3 { ; CHECK-LABEL: @f9( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[S:%.*]] = alloca i8, align 1 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr nonnull [[S]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[S]]) ; CHECK-NEXT: invoke void @"\01??1S2@@QEAA@XZ"(ptr [[S]]) ; CHECK-NEXT: to label [[TRY_CONT:%.*]] unwind label [[CATCH_DISPATCH:%.*]] ; CHECK: catch.dispatch: @@ -450,13 +450,13 @@ define i32 @f9() personality ptr @__CxxFrameHandler3 { ; entry: %s = alloca i8, align 1 - call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %s) + call void @llvm.lifetime.start.p0(ptr nonnull %s) invoke void @"\01??1S2@@QEAA@XZ"(ptr %s) to label %try.cont unwind label %ehcleanup ehcleanup: %cleanup.pad = cleanuppad within none [] - call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %s) + call void @llvm.lifetime.end.p0(ptr nonnull %s) cleanupret from %cleanup.pad unwind label %catch.dispatch catch.dispatch: @@ -534,7 +534,7 @@ invoke.cont2: ; preds = %invoke.cont ehcleanup: ; preds = %invoke.cont, %entry %0 = cleanuppad within none [] - call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %x) + call void @llvm.lifetime.end.p0(ptr nonnull %x) cleanupret from %0 unwind label %catch.dispatch catch.dispatch: ; preds = %ehcleanup, %invoke.cont @@ -556,8 +556,8 @@ declare void @use_x(i32 %x) declare i32 @__CxxFrameHandler3(...) -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) ;. ; CHECK: attributes #[[ATTR0:[0-9]+]] = { nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) } ;. diff --git a/llvm/test/Transforms/SimplifyCFG/X86/invalidate-dom.ll b/llvm/test/Transforms/SimplifyCFG/X86/invalidate-dom.ll index a937d9c..ce58e936 100644 --- a/llvm/test/Transforms/SimplifyCFG/X86/invalidate-dom.ll +++ b/llvm/test/Transforms/SimplifyCFG/X86/invalidate-dom.ll @@ -79,10 +79,10 @@ for.body: ; preds = %for.cond declare i32 @c(...) #0 ; Function Attrs: argmemonly nounwind -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #2 +declare void @llvm.lifetime.start.p0(ptr nocapture) #2 ; Function Attrs: argmemonly nounwind -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #2 +declare void @llvm.lifetime.end.p0(ptr nocapture) #2 attributes #0 = { "use-soft-float"="false" } attributes #1 = { "target-cpu"="x86-64" } diff --git a/llvm/test/Transforms/SimplifyCFG/X86/sink-common-code.ll b/llvm/test/Transforms/SimplifyCFG/X86/sink-common-code.ll index 62351d7..6129e3b 100644 --- a/llvm/test/Transforms/SimplifyCFG/X86/sink-common-code.ll +++ b/llvm/test/Transforms/SimplifyCFG/X86/sink-common-code.ll @@ -1338,10 +1338,10 @@ define i32 @test_not_sink_lifetime_marker(i1 zeroext %flag, i32 %x) { ; CHECK-NEXT: [[Z:%.*]] = alloca i32, align 4 ; CHECK-NEXT: br i1 [[FLAG:%.*]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]] ; CHECK: if.then: -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[Y]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[Y]]) ; CHECK-NEXT: br label [[IF_END:%.*]] ; CHECK: if.else: -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[Z]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[Z]]) ; CHECK-NEXT: br label [[IF_END]] ; CHECK: if.end: ; CHECK-NEXT: ret i32 1 @@ -1352,11 +1352,11 @@ entry: br i1 %flag, label %if.then, label %if.else if.then: - call void @llvm.lifetime.end.p0(i64 4, ptr %y) + call void @llvm.lifetime.end.p0(ptr %y) br label %if.end if.else: - call void @llvm.lifetime.end.p0(i64 4, ptr %z) + call void @llvm.lifetime.end.p0(ptr %z) br label %if.end if.end: @@ -1468,8 +1468,8 @@ declare void @direct_callee() declare void @direct_callee2() declare void @direct_callee3() -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) define void @creating_too_many_phis(i1 %cond, i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h) { ; CHECK-LABEL: @creating_too_many_phis( diff --git a/llvm/test/Transforms/SimplifyCFG/common-code-hoisting.ll b/llvm/test/Transforms/SimplifyCFG/common-code-hoisting.ll index a430399..307501d 100644 --- a/llvm/test/Transforms/SimplifyCFG/common-code-hoisting.ll +++ b/llvm/test/Transforms/SimplifyCFG/common-code-hoisting.ll @@ -39,8 +39,8 @@ declare void @f0() declare void @f1() declare void @f2() -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) define void @_Z4loopi(i1 %cmp) { ; HOIST-LABEL: @_Z4loopi( diff --git a/llvm/test/Transforms/SimplifyCFG/invoke_unwind_lifetime.ll b/llvm/test/Transforms/SimplifyCFG/invoke_unwind_lifetime.ll index ea14b17..40e9a49 100644 --- a/llvm/test/Transforms/SimplifyCFG/invoke_unwind_lifetime.ll +++ b/llvm/test/Transforms/SimplifyCFG/invoke_unwind_lifetime.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals ; RUN: opt < %s -passes=simplifycfg -simplifycfg-require-and-preserve-domtree=1 -S | FileCheck %s -declare void @llvm.lifetime.start.p0(i64, ptr) -declare void @llvm.lifetime.end.p0(i64, ptr) +declare void @llvm.lifetime.start.p0(ptr) +declare void @llvm.lifetime.end.p0(ptr) declare void @escape(ptr) @@ -15,16 +15,16 @@ define void @caller(i1 %c) personality ptr @__gxx_personality_v0 { ; CHECK-LABEL: @caller( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[I0:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[I0]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[I0]]) ; CHECK-NEXT: call void @escape(ptr [[I0]]) ; CHECK-NEXT: [[I2:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[I2]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[I2]]) ; CHECK-NEXT: call void @escape(ptr [[I2]]) ; CHECK-NEXT: [[I4:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[I4]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[I4]]) ; CHECK-NEXT: call void @escape(ptr [[I4]]) ; CHECK-NEXT: [[I6:%.*]] = alloca i32, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[I6]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[I6]]) ; CHECK-NEXT: call void @escape(ptr [[I6]]) ; CHECK-NEXT: br i1 [[C:%.*]], label [[V0:%.*]], label [[V1:%.*]] ; CHECK: v0: @@ -36,19 +36,19 @@ define void @caller(i1 %c) personality ptr @__gxx_personality_v0 { ; entry: %i0 = alloca i32 - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %i0) + call void @llvm.lifetime.start.p0(ptr nonnull %i0) call void @escape(ptr %i0) %i2 = alloca i32 - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %i2) + call void @llvm.lifetime.start.p0(ptr nonnull %i2) call void @escape(ptr %i2) %i4 = alloca i32 - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %i4) + call void @llvm.lifetime.start.p0(ptr nonnull %i4) call void @escape(ptr %i4) %i6 = alloca i32 - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %i6) + call void @llvm.lifetime.start.p0(ptr nonnull %i6) call void @escape(ptr %i6) br i1 %c, label %v0, label %v1 @@ -66,14 +66,14 @@ invoke.cont: lpad.v0: %i8 = landingpad { ptr, i32 } cleanup - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %i0) - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %i4) + call void @llvm.lifetime.end.p0(ptr nonnull %i0) + call void @llvm.lifetime.end.p0(ptr nonnull %i4) br label %end lpad.v1: %i9 = landingpad { ptr, i32 } cleanup - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %i2) - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %i6) + call void @llvm.lifetime.end.p0(ptr nonnull %i2) + call void @llvm.lifetime.end.p0(ptr nonnull %i6) br label %end end: diff --git a/llvm/test/Transforms/SimplifyCFG/lifetime-landingpad.ll b/llvm/test/Transforms/SimplifyCFG/lifetime-landingpad.ll index 0174eb1..88395a0 100644 --- a/llvm/test/Transforms/SimplifyCFG/lifetime-landingpad.ll +++ b/llvm/test/Transforms/SimplifyCFG/lifetime-landingpad.ll @@ -5,32 +5,32 @@ define void @foo() personality ptr @__gxx_personality_v0 { ; CHECK-LABEL: @foo( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[A:%.*]] = alloca i8, align 1 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr nonnull [[A]]) #[[ATTR1:[0-9]+]] +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[A]]) #[[ATTR1:[0-9]+]] ; CHECK-NEXT: call void @bar() -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr nonnull [[A]]) #[[ATTR1]] +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[A]]) #[[ATTR1]] ; CHECK-NEXT: ret void ; entry: %a = alloca i8 - call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %a) nounwind + call void @llvm.lifetime.start.p0(ptr nonnull %a) nounwind invoke void @bar() to label %invoke.cont unwind label %lpad invoke.cont: - call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %a) nounwind + call void @llvm.lifetime.end.p0(ptr nonnull %a) nounwind ret void lpad: %b = landingpad { ptr, i32 } cleanup - call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %a) nounwind + call void @llvm.lifetime.end.p0(ptr nonnull %a) nounwind resume { ptr, i32 } %b } declare void @bar() -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) nounwind +declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) nounwind +declare void @llvm.lifetime.end.p0(ptr nocapture) nounwind declare i32 @__gxx_personality_v0(...) ;. diff --git a/llvm/test/Transforms/SimplifyCFG/lifetime.ll b/llvm/test/Transforms/SimplifyCFG/lifetime.ll index d6bba2c..fac0b61 100644 --- a/llvm/test/Transforms/SimplifyCFG/lifetime.ll +++ b/llvm/test/Transforms/SimplifyCFG/lifetime.ll @@ -10,11 +10,11 @@ define void @foo(i1 %x) { entry: %a = alloca i8 - call void @llvm.lifetime.start.p0(i64 -1, ptr %a) nounwind + call void @llvm.lifetime.start.p0(ptr %a) nounwind br i1 %x, label %bb0, label %bb1 bb0: - call void @llvm.lifetime.end.p0(i64 -1, ptr %a) nounwind + call void @llvm.lifetime.end.p0(ptr %a) nounwind br label %bb1 bb1: @@ -24,6 +24,6 @@ bb1: declare void @f() -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind +declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind +declare void @llvm.lifetime.end.p0(ptr nocapture) nounwind diff --git a/llvm/test/Transforms/SimplifyCFG/pr50060-constantfold-loopid.ll b/llvm/test/Transforms/SimplifyCFG/pr50060-constantfold-loopid.ll index 55f1c01..19e1c73 100644 --- a/llvm/test/Transforms/SimplifyCFG/pr50060-constantfold-loopid.ll +++ b/llvm/test/Transforms/SimplifyCFG/pr50060-constantfold-loopid.ll @@ -22,7 +22,7 @@ define dso_local void @_Z6test01v() addrspace(1) #0 { ; CHECK: do.body: ; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @C, align 4, !tbaa [[TBAA2:![0-9]+]] ; CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[TMP0]], 1 -; CHECK-NEXT: call addrspace(1) void @llvm.lifetime.start.p0(i64 4, ptr [[J]]) #[[ATTR2:[0-9]+]] +; CHECK-NEXT: call addrspace(1) void @llvm.lifetime.start.p0(ptr [[J]]) #[[ATTR2:[0-9]+]] ; CHECK-NEXT: store i32 0, ptr [[J]], align 4, !tbaa [[TBAA2]] ; CHECK-NEXT: br label [[FOR_COND:%.*]] ; CHECK: for.cond: @@ -30,11 +30,11 @@ define dso_local void @_Z6test01v() addrspace(1) #0 { ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP1]], 3 ; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_COND_CLEANUP:%.*]] ; CHECK: for.cond.cleanup: -; CHECK-NEXT: call addrspace(1) void @llvm.lifetime.end.p0(i64 4, ptr [[J]]) #[[ATTR2]] +; CHECK-NEXT: call addrspace(1) void @llvm.lifetime.end.p0(ptr [[J]]) #[[ATTR2]] ; CHECK-NEXT: br label [[DO_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: for.body: ; CHECK-NEXT: store i32 undef, ptr [[I]], align 4 -; CHECK-NEXT: call addrspace(1) void @llvm.lifetime.start.p0(i64 4, ptr [[I]]) #[[ATTR2]] +; CHECK-NEXT: call addrspace(1) void @llvm.lifetime.start.p0(ptr [[I]]) #[[ATTR2]] ; CHECK-NEXT: store i32 0, ptr [[I]], align 4, !tbaa [[TBAA2]] ; CHECK-NEXT: br label [[FOR_COND1:%.*]] ; CHECK: for.cond1: @@ -43,7 +43,7 @@ define dso_local void @_Z6test01v() addrspace(1) #0 { ; CHECK-NEXT: [[CMP2:%.*]] = icmp slt i32 [[TMP2]], [[TMP3]] ; CHECK-NEXT: br i1 [[CMP2]], label [[FOR_BODY4:%.*]], label [[FOR_COND_CLEANUP3:%.*]] ; CHECK: for.cond.cleanup3: -; CHECK-NEXT: call addrspace(1) void @llvm.lifetime.end.p0(i64 4, ptr [[I]]) #[[ATTR2]] +; CHECK-NEXT: call addrspace(1) void @llvm.lifetime.end.p0(ptr [[I]]) #[[ATTR2]] ; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[J]], align 4, !tbaa [[TBAA2]] ; CHECK-NEXT: [[INC7:%.*]] = add nsw i32 [[TMP4]], 1 ; CHECK-NEXT: store i32 [[INC7]], ptr [[J]], align 4, !tbaa [[TBAA2]] @@ -64,7 +64,7 @@ entry: do.body: ; preds = %do.cond, %entry %0 = load i32, ptr @C, align 4, !tbaa !2 %inc = add nsw i32 %0, 1 - call addrspace(1) void @llvm.lifetime.start.p0(i64 4, ptr %j) #2 + call addrspace(1) void @llvm.lifetime.start.p0(ptr %j) #2 store i32 0, ptr %j, align 4, !tbaa !2 br label %for.cond @@ -74,12 +74,12 @@ for.cond: ; preds = %for.inc6, %do.body br i1 %cmp, label %for.body, label %for.cond.cleanup for.cond.cleanup: ; preds = %for.cond - call addrspace(1) void @llvm.lifetime.end.p0(i64 4, ptr %j) #2 + call addrspace(1) void @llvm.lifetime.end.p0(ptr %j) #2 br label %for.end8 for.body: ; preds = %for.cond store i32 undef, ptr %i, align 4 - call addrspace(1) void @llvm.lifetime.start.p0(i64 4, ptr %i) #2 + call addrspace(1) void @llvm.lifetime.start.p0(ptr %i) #2 store i32 0, ptr %i, align 4, !tbaa !2 br label %for.cond1 @@ -90,7 +90,7 @@ for.cond1: ; preds = %for.inc, %for.body br i1 %cmp2, label %for.body4, label %for.cond.cleanup3 for.cond.cleanup3: ; preds = %for.cond1 - call addrspace(1) void @llvm.lifetime.end.p0(i64 4, ptr %i) #2 + call addrspace(1) void @llvm.lifetime.end.p0(ptr %i) #2 br label %for.end for.body4: ; preds = %for.cond1 @@ -124,10 +124,10 @@ do.end: ; preds = %do.cond } ; Function Attrs: argmemonly nofree nosync nounwind willreturn -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) addrspace(1) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) addrspace(1) #1 ; Function Attrs: argmemonly nofree nosync nounwind willreturn -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) addrspace(1) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) addrspace(1) #1 attributes #0 = { nounwind "disable-tail-calls"="false" "frame-pointer"="all" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } attributes #1 = { argmemonly nofree nosync nounwind willreturn } diff --git a/llvm/test/Transforms/SimplifyCFG/sink-and-convert-switch.ll b/llvm/test/Transforms/SimplifyCFG/sink-and-convert-switch.ll index 87d6493..0014b91 100644 --- a/llvm/test/Transforms/SimplifyCFG/sink-and-convert-switch.ll +++ b/llvm/test/Transforms/SimplifyCFG/sink-and-convert-switch.ll @@ -8,16 +8,16 @@ define void @pr104567(i8 %x, ptr %f) { ; CHECK-SAME: i8 [[X:%.*]], ptr [[F:%.*]]) { ; CHECK-NEXT: [[START:.*:]] ; CHECK-NEXT: [[Y:%.*]] = alloca [1 x i8], align 1 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr nonnull [[Y]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[Y]]) ; CHECK-NEXT: [[SWITCH_OFFSET:%.*]] = add nsw i8 [[X]], 4 ; CHECK-NEXT: store i8 [[SWITCH_OFFSET]], ptr [[Y]], align 1 ; CHECK-NEXT: call void [[F]](ptr [[Y]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr nonnull [[Y]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[Y]]) ; CHECK-NEXT: ret void ; start: %y = alloca [1 x i8], align 1 - call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %y) + call void @llvm.lifetime.start.p0(ptr nonnull %y) switch i8 %x, label %default.unreachable [ i8 0, label %bb4 i8 1, label %bb3 @@ -41,7 +41,7 @@ bb2: bb5: call void %f(ptr %y) - call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %y) + call void @llvm.lifetime.end.p0(ptr nonnull %y) ret void } diff --git a/llvm/test/Transforms/SimplifyCFG/tail-merge-noreturn.ll b/llvm/test/Transforms/SimplifyCFG/tail-merge-noreturn.ll index 10e4870..77ce730 100644 --- a/llvm/test/Transforms/SimplifyCFG/tail-merge-noreturn.ll +++ b/llvm/test/Transforms/SimplifyCFG/tail-merge-noreturn.ll @@ -315,8 +315,8 @@ cont3: ; from sharing stack slots for x and y. declare void @escape_i32_ptr(ptr) -declare void @llvm.lifetime.start(i64, ptr nocapture) -declare void @llvm.lifetime.end(i64, ptr nocapture) +declare void @llvm.lifetime.start(ptr nocapture) +declare void @llvm.lifetime.end(ptr nocapture) define void @dont_merge_lifetimes(i32 %c1, i32 %c2) { ; CHECK-LABEL: @dont_merge_lifetimes( @@ -328,7 +328,7 @@ define void @dont_merge_lifetimes(i32 %c1, i32 %c2) { ; CHECK-NEXT: i32 42, label [[IF_THEN3:%.*]] ; CHECK-NEXT: ] ; CHECK: if.then: -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X]]) ; CHECK-NEXT: store i32 0, ptr [[X]], align 4 ; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[C2:%.*]], 0 ; CHECK-NEXT: br i1 [[TOBOOL]], label [[IF_END:%.*]], label [[IF_THEN1:%.*]] @@ -336,11 +336,11 @@ define void @dont_merge_lifetimes(i32 %c1, i32 %c2) { ; CHECK-NEXT: call void @escape_i32_ptr(ptr nonnull [[X]]) ; CHECK-NEXT: br label [[IF_END]] ; CHECK: if.end: -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X]]) ; CHECK-NEXT: call void @abort() ; CHECK-NEXT: unreachable ; CHECK: if.then3: -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[Y]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[Y]]) ; CHECK-NEXT: store i32 0, ptr [[Y]], align 4 ; CHECK-NEXT: [[TOBOOL5:%.*]] = icmp eq i32 [[C2]], 0 ; CHECK-NEXT: br i1 [[TOBOOL5]], label [[IF_END7:%.*]], label [[IF_THEN6:%.*]] @@ -348,7 +348,7 @@ define void @dont_merge_lifetimes(i32 %c1, i32 %c2) { ; CHECK-NEXT: call void @escape_i32_ptr(ptr nonnull [[Y]]) ; CHECK-NEXT: br label [[IF_END7]] ; CHECK: if.end7: -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[Y]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[Y]]) ; CHECK-NEXT: call void @abort() ; CHECK-NEXT: unreachable ; CHECK: if.end9: @@ -363,7 +363,7 @@ entry: ] if.then: ; preds = %entry - call void @llvm.lifetime.start(i64 4, ptr nonnull %x) + call void @llvm.lifetime.start(ptr nonnull %x) store i32 0, ptr %x, align 4 %tobool = icmp eq i32 %c2, 0 br i1 %tobool, label %if.end, label %if.then1 @@ -373,12 +373,12 @@ if.then1: ; preds = %if.then br label %if.end if.end: ; preds = %if.then1, %if.then - call void @llvm.lifetime.end(i64 4, ptr nonnull %x) + call void @llvm.lifetime.end(ptr nonnull %x) call void @abort() unreachable if.then3: ; preds = %entry - call void @llvm.lifetime.start(i64 4, ptr nonnull %y) + call void @llvm.lifetime.start(ptr nonnull %y) store i32 0, ptr %y, align 4 %tobool5 = icmp eq i32 %c2, 0 br i1 %tobool5, label %if.end7, label %if.then6 @@ -388,7 +388,7 @@ if.then6: ; preds = %if.then3 br label %if.end7 if.end7: ; preds = %if.then6, %if.then3 - call void @llvm.lifetime.end(i64 4, ptr nonnull %y) + call void @llvm.lifetime.end(ptr nonnull %y) call void @abort() unreachable diff --git a/llvm/test/Transforms/TailCallElim/tre-byval-parameter-2.ll b/llvm/test/Transforms/TailCallElim/tre-byval-parameter-2.ll index 325db79..fa771ad 100644 --- a/llvm/test/Transforms/TailCallElim/tre-byval-parameter-2.ll +++ b/llvm/test/Transforms/TailCallElim/tre-byval-parameter-2.ll @@ -44,17 +44,17 @@ define dso_local void @_Z7dostuff1AS_i(ptr nocapture byval(%struct.A) align 8 %a ; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds [[STRUCT_A]], ptr [[B]], i64 0, i32 0, i64 5 ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr [[ARRAYIDX4]], align 8 ; CHECK-NEXT: [[CALL:%.*]] = tail call i32 (ptr, ...) @printf(ptr nonnull dereferenceable(1) @.str, i64 [[INC]], i64 [[TMP1]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 80, ptr nonnull [[AGG_TMP]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[AGG_TMP]]) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 8 dereferenceable(80) [[AGG_TMP]], ptr nonnull align 8 dereferenceable(80) [[B]], i64 80, i1 false) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 80, ptr nonnull [[AGG_TMP5]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[AGG_TMP5]]) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 8 dereferenceable(80) [[AGG_TMP5]], ptr nonnull align 8 dereferenceable(80) [[A]], i64 80, i1 false) ; CHECK-NEXT: [[ADD]] = add nsw i32 [[I_TR]], 1 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[AGG_TMP1]], ptr align 8 [[AGG_TMP]], i64 80, i1 false) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[AGG_TMP52]], ptr align 8 [[AGG_TMP5]], i64 80, i1 false) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[A]], ptr align 8 [[AGG_TMP1]], i64 80, i1 false) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[B]], ptr align 8 [[AGG_TMP52]], i64 80, i1 false) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 80, ptr nonnull [[AGG_TMP]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 80, ptr nonnull [[AGG_TMP5]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[AGG_TMP]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[AGG_TMP5]]) ; CHECK-NEXT: br label [[TAILRECURSE]] ; CHECK: return: ; CHECK-NEXT: ret void @@ -74,14 +74,14 @@ if.end: ; preds = %entry %1 = load i64, ptr %arrayidx4, align 8 %call = call i32 (ptr, ...) @printf(ptr nonnull dereferenceable(1) @.str , i64 %inc, i64 %1) - call void @llvm.lifetime.start.p0(i64 80, ptr nonnull %agg.tmp) + call void @llvm.lifetime.start.p0(ptr nonnull %agg.tmp) call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 8 dereferenceable(80) %agg.tmp, ptr nonnull align 8 dereferenceable(80) %b, i64 80, i1 false) - call void @llvm.lifetime.start.p0(i64 80, ptr nonnull %agg.tmp5) + call void @llvm.lifetime.start.p0(ptr nonnull %agg.tmp5) call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 8 dereferenceable(80) %agg.tmp5, ptr nonnull align 8 dereferenceable(80) %a, i64 80, i1 false) %add = add nsw i32 %i, 1 call void @_Z7dostuff1AS_i(ptr nonnull byval(%struct.A) align 8 %agg.tmp, ptr nonnull byval(%struct.A) align 8 %agg.tmp5, i32 %add) - call void @llvm.lifetime.end.p0(i64 80, ptr nonnull %agg.tmp) - call void @llvm.lifetime.end.p0(i64 80, ptr nonnull %agg.tmp5) + call void @llvm.lifetime.end.p0(ptr nonnull %agg.tmp) + call void @llvm.lifetime.end.p0(ptr nonnull %agg.tmp5) br label %return return: ; preds = %entry, %if.end @@ -95,10 +95,10 @@ declare dso_local noundef i32 @printf(ptr nocapture noundef readonly, ...) local declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg) #2 ; Function Attrs: argmemonly nounwind willreturn -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #2 +declare void @llvm.lifetime.start.p0(ptr nocapture) #2 ; Function Attrs: argmemonly nounwind willreturn -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #2 +declare void @llvm.lifetime.end.p0(ptr nocapture) #2 ; Function Attrs: noinline norecurse nounwind optnone uwtable define dso_local i32 @main() local_unnamed_addr #3 { diff --git a/llvm/test/Transforms/TailCallElim/tre-byval-parameter.ll b/llvm/test/Transforms/TailCallElim/tre-byval-parameter.ll index 256fb04..dedd081 100644 --- a/llvm/test/Transforms/TailCallElim/tre-byval-parameter.ll +++ b/llvm/test/Transforms/TailCallElim/tre-byval-parameter.ll @@ -44,14 +44,14 @@ define dso_local i32 @_Z3fooi1S(i32 %count, ptr nocapture readonly byval(%struct ; CHECK: if.end: ; CHECK-NEXT: [[ADD]] = add nsw i32 [[COUNT_TR]], 1 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 8 dereferenceable(20) [[AGG_TMP1]], ptr nonnull align 8 dereferenceable(20) [[P1]], i64 20, i1 false) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 20, ptr nonnull [[AGG_TMP14]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 20, ptr nonnull [[AGG_TMP_I]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[AGG_TMP14]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[AGG_TMP_I]]) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 8 dereferenceable(20) [[AGG_TMP14]], ptr nonnull align 8 dereferenceable(20) [[AGG_TMP1]], i64 20, i1 false) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 8 dereferenceable(20) [[AGG_TMP_I]], ptr nonnull align 8 dereferenceable(20) [[AGG_TMP14]], i64 20, i1 false) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[AGG_TMP_I1]], ptr align 8 [[AGG_TMP_I]], i64 20, i1 false) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[P1]], ptr align 8 [[AGG_TMP_I1]], i64 20, i1 false) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 20, ptr nonnull [[AGG_TMP14]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 20, ptr nonnull [[AGG_TMP_I]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[AGG_TMP14]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[AGG_TMP_I]]) ; CHECK-NEXT: br label [[TAILRECURSE]] ; CHECK: return: ; CHECK-NEXT: ret i32 [[CALL]] @@ -72,13 +72,13 @@ if.then: ; preds = %entry if.end: ; preds = %entry %add = add nsw i32 %count, 1 call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 8 dereferenceable(20) %agg.tmp1, ptr nonnull align 8 dereferenceable(20) %p1, i64 20, i1 false) - call void @llvm.lifetime.start.p0(i64 20, ptr nonnull %agg.tmp14) - call void @llvm.lifetime.start.p0(i64 20, ptr nonnull %agg.tmp.i) + call void @llvm.lifetime.start.p0(ptr nonnull %agg.tmp14) + call void @llvm.lifetime.start.p0(ptr nonnull %agg.tmp.i) call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 8 dereferenceable(20) %agg.tmp14, ptr nonnull align 8 dereferenceable(20) %agg.tmp1, i64 20, i1 false) call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 8 dereferenceable(20) %agg.tmp.i, ptr nonnull align 8 dereferenceable(20) %agg.tmp14, i64 20, i1 false) %call.i = call i32 @_Z3fooi1S(i32 %add, ptr nonnull byval(%struct.S) align 8 %agg.tmp.i) - call void @llvm.lifetime.end.p0(i64 20, ptr nonnull %agg.tmp14) - call void @llvm.lifetime.end.p0(i64 20, ptr nonnull %agg.tmp.i) + call void @llvm.lifetime.end.p0(ptr nonnull %agg.tmp14) + call void @llvm.lifetime.end.p0(ptr nonnull %agg.tmp.i) br label %return return: ; preds = %if.end, %if.then @@ -89,10 +89,10 @@ return: ; preds = %if.end, %if.then declare dso_local i32 @_Z3zoo1S(ptr byval(%struct.S) align 8) local_unnamed_addr #1 ; Function Attrs: argmemonly nounwind willreturn -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #2 +declare void @llvm.lifetime.start.p0(ptr nocapture) #2 ; Function Attrs: argmemonly nounwind willreturn -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #2 +declare void @llvm.lifetime.end.p0(ptr nocapture) #2 ; Function Attrs: argmemonly nounwind willreturn declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg) #2 diff --git a/llvm/test/Transforms/TailCallElim/tre-multiple-exits.ll b/llvm/test/Transforms/TailCallElim/tre-multiple-exits.ll index 293deca..b77ae9c 100644 --- a/llvm/test/Transforms/TailCallElim/tre-multiple-exits.ll +++ b/llvm/test/Transforms/TailCallElim/tre-multiple-exits.ll @@ -49,11 +49,11 @@ define dso_local void @_Z19test_multiple_exitsi(i32 %param) local_unnamed_addr # ; CHECK-NEXT: [[TMP0:%.*]] = icmp ult i32 [[PARAM_TR]], 10 ; CHECK-NEXT: br i1 [[TMP0]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]] ; CHECK: if.then: -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[TEMP]]) #1 +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TEMP]]) #1 ; CHECK-NEXT: call void @_Z11capture_argPi(ptr nonnull [[TEMP]]) ; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[PARAM_TR]], 1 ; CHECK-NEXT: call void @_Z19test_multiple_exitsi(i32 [[ADD]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[TEMP]]) #1 +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TEMP]]) #1 ; CHECK-NEXT: br label [[IF_END14:%.*]] ; CHECK: if.else: ; CHECK-NEXT: [[PARAM_OFF:%.*]] = add i32 [[PARAM_TR]], -10 @@ -80,11 +80,11 @@ entry: br i1 %0, label %if.then, label %if.else if.then: ; preds = %entry - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %temp) #2 + call void @llvm.lifetime.start.p0(ptr nonnull %temp) #2 call void @_Z11capture_argPi(ptr nonnull %temp) %add = add nuw nsw i32 %param, 1 call void @_Z19test_multiple_exitsi(i32 %add) - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %temp) #2 + call void @llvm.lifetime.end.p0(ptr nonnull %temp) #2 br label %if.end14 if.else: ; preds = %entry @@ -113,10 +113,10 @@ if.end14: ; preds = %if.then5, %if.then1 } ; Function Attrs: argmemonly nounwind willreturn -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #2 +declare void @llvm.lifetime.start.p0(ptr nocapture) #2 ; Function Attrs: argmemonly nounwind willreturn -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #2 +declare void @llvm.lifetime.end.p0(ptr nocapture) #2 attributes #0 = { nofree noinline norecurse nounwind uwtable } attributes #1 = { nounwind uwtable } diff --git a/llvm/test/Transforms/TailCallElim/tre-noncapturing-alloca-calls.ll b/llvm/test/Transforms/TailCallElim/tre-noncapturing-alloca-calls.ll index c9ac9a5d..2f1aded 100644 --- a/llvm/test/Transforms/TailCallElim/tre-noncapturing-alloca-calls.ll +++ b/llvm/test/Transforms/TailCallElim/tre-noncapturing-alloca-calls.ll @@ -34,11 +34,11 @@ define dso_local void @_Z4testi(i32 %recurseCount) local_unnamed_addr #1 { ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[RECURSECOUNT_TR]], 0 ; CHECK-NEXT: br i1 [[CMP]], label [[RETURN:%.*]], label [[IF_END]] ; CHECK: if.end: -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[TEMP]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TEMP]]) ; CHECK-NEXT: store i32 10, ptr [[TEMP]], align 4 ; CHECK-NEXT: call void @_Z15globalIncrementPKi(ptr nonnull [[TEMP]]) ; CHECK-NEXT: [[SUB]] = add nsw i32 [[RECURSECOUNT_TR]], -1 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[TEMP]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TEMP]]) ; CHECK-NEXT: br label [[TAILRECURSE]] ; CHECK: return: ; CHECK-NEXT: ret void @@ -49,12 +49,12 @@ entry: br i1 %cmp, label %return, label %if.end if.end: ; preds = %entry - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %temp) #6 + call void @llvm.lifetime.start.p0(ptr nonnull %temp) #6 store i32 10, ptr %temp, align 4 call void @_Z15globalIncrementPKi(ptr nonnull %temp) %sub = add nsw i32 %recurseCount, -1 call void @_Z4testi(i32 %sub) - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %temp) #6 + call void @llvm.lifetime.end.p0(ptr nonnull %temp) #6 br label %return return: ; preds = %entry, %if.end @@ -62,10 +62,10 @@ return: ; preds = %entry, %if.end } ; Function Attrs: argmemonly nounwind willreturn -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #2 +declare void @llvm.lifetime.start.p0(ptr nocapture) #2 ; Function Attrs: argmemonly nounwind willreturn -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #2 +declare void @llvm.lifetime.end.p0(ptr nocapture) #2 attributes #0 = { nofree noinline norecurse nounwind uwtable } attributes #1 = { nounwind uwtable } diff --git a/llvm/test/Transforms/Util/PredicateInfo/pr33456.ll b/llvm/test/Transforms/Util/PredicateInfo/pr33456.ll index 14bfbb1..36eaf6e 100644 --- a/llvm/test/Transforms/Util/PredicateInfo/pr33456.ll +++ b/llvm/test/Transforms/Util/PredicateInfo/pr33456.ll @@ -61,8 +61,8 @@ define i32 @main() { } ; Function Attrs: argmemonly nounwind -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) ; Function Attrs: argmemonly nounwind -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) diff --git a/llvm/test/Transforms/Util/dbg-call-bitcast.ll b/llvm/test/Transforms/Util/dbg-call-bitcast.ll index d8d80ab..f0c579c 100644 --- a/llvm/test/Transforms/Util/dbg-call-bitcast.ll +++ b/llvm/test/Transforms/Util/dbg-call-bitcast.ll @@ -2,7 +2,7 @@ define dso_local void @_Z1fv() { %1 = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %1) + call void @llvm.lifetime.start.p0(ptr nonnull %1) call void @llvm.dbg.declare(metadata ptr %1, metadata !16, metadata !DIExpression()), !dbg !19 ; CHECK: %[[A:.*]] = alloca i32, align 4 ; CHECK: #dbg_value(ptr %[[A]], {{.*}}, !DIExpression(DW_OP_deref) @@ -11,13 +11,13 @@ define dso_local void @_Z1fv() { ; CHECK-NOT: #dbg_value ; CHECK: call void @_Z1gPv call void @_Z1gPv(ptr nonnull %1) - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %1) + call void @llvm.lifetime.end.p0(ptr nonnull %1) ret void, !dbg !21 } define dso_local void @_Z2fv() { %1 = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %1) + call void @llvm.lifetime.start.p0(ptr nonnull %1) call void @llvm.dbg.declare(metadata ptr %1, metadata !16, metadata !DIExpression()), !dbg !19 ; CHECK: %[[A:.*]] = alloca i32, align 4 ; CHECK: #dbg_value(ptr %[[A]], {{.*}}, !DIExpression(DW_OP_deref) @@ -29,14 +29,14 @@ block2: ; CHECK: #dbg_value(ptr %[[A]], {{.*}}, !DIExpression(DW_OP_deref) ; CHECK: call void @_Z1gPv call void @_Z1gPv(ptr nonnull %1) - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %1) + call void @llvm.lifetime.end.p0(ptr nonnull %1) ret void, !dbg !21 } -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) declare void @llvm.dbg.declare(metadata, metadata, metadata) declare dso_local void @_Z1gPv(ptr) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) !llvm.dbg.cu = !{!0} !llvm.module.flags = !{!8, !9, !10} diff --git a/llvm/test/Verifier/intrinsic-immarg.ll b/llvm/test/Verifier/intrinsic-immarg.ll index c1bb932..d5aef3d 100644 --- a/llvm/test/Verifier/intrinsic-immarg.ll +++ b/llvm/test/Verifier/intrinsic-immarg.ll @@ -163,26 +163,6 @@ define void @test_scatter_8i32(<8 x i32> %a1, <8 x ptr> %ptr, <8 x i1> %mask, i3 ret void } -declare void @llvm.lifetime.start.p0(i64, ptr) -define void @test_lifetime_start(i64 %arg0) { - ; CHECK: immarg operand has non-immediate parameter - ; CHECK-NEXT: i64 %arg0 - ; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 %arg0, ptr %ptr) - %ptr = alloca i64 - call void @llvm.lifetime.start.p0(i64 %arg0, ptr %ptr) - ret void -} - -declare void @llvm.lifetime.end.p0(i64, ptr) -define void @test_lifetime_end(i64 %arg0) { - ; CHECK: immarg operand has non-immediate parameter - ; CHECK-NEXT: i64 %arg0 - ; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 %arg0, ptr %ptr) - %ptr = alloca i64 - call void @llvm.lifetime.end.p0(i64 %arg0, ptr %ptr) - ret void -} - declare ptr @llvm.invariant.start.p0(i64, ptr) define void @test_invariant_start(i64 %arg0, ptr %ptr) { ; CHECK: immarg operand has non-immediate parameter diff --git a/llvm/test/Verifier/opaque-ptr.ll b/llvm/test/Verifier/opaque-ptr.ll index 10e43a4..3ac9044 100644 --- a/llvm/test/Verifier/opaque-ptr.ll +++ b/llvm/test/Verifier/opaque-ptr.ll @@ -40,13 +40,13 @@ define void @atomicrmw(ptr %a, i32 %i) { define void @opaque_mangle() { ; CHECK-LABEL: @opaque_mangle( ; CHECK-NEXT: [[A:%.*]] = alloca i64, align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[A]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]]) ; CHECK-NEXT: ret void ; %a = alloca i64 - call void @llvm.lifetime.start.p0(i64 8, ptr %a) - call void @llvm.lifetime.end.p0(i64 8, ptr %a) + call void @llvm.lifetime.start.p0(ptr %a) + call void @llvm.lifetime.end.p0(ptr %a) ret void } @@ -65,10 +65,8 @@ define void @intrinsic_calls(ptr %a) { ret void } -; CHECK: @llvm.lifetime.start.p0 -; CHECK: @llvm.lifetime.end.p0 -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) declare <2 x i32> @llvm.masked.load.v2i32.p0(ptr, i32, <2 x i1>, <2 x i32>) declare void @llvm.masked.store.v2i32.p0(<2 x i32>, ptr, i32, <2 x i1>) diff --git a/llvm/test/lit.site.cfg.py.in b/llvm/test/lit.site.cfg.py.in index 893e2cb..973e0ec9 100644 --- a/llvm/test/lit.site.cfg.py.in +++ b/llvm/test/lit.site.cfg.py.in @@ -66,7 +66,6 @@ config.spirv_tools_tests = @LLVM_INCLUDE_SPIRV_TOOLS_TESTS@ config.have_vc_rev = @LLVM_APPEND_VC_REV@ config.force_vc_rev = "@LLVM_FORCE_VC_REVISION@" config.has_logf128 = @LLVM_HAS_LOGF128@ -config.has_key_instructions = @LLVM_EXPERIMENTAL_KEY_INSTRUCTIONS@ import lit.llvm lit.llvm.initialize(lit_config, config) diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll index 96ff2d7..ef60118 100644 --- a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll +++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll @@ -14,7 +14,7 @@ entry: #dbg_assign(i1 undef, !13, !DIExpression(), !16, ptr %A.addr, !DIExpression(), !17) store ptr %A, ptr %A.addr, align 8, !tbaa !18 #dbg_declare(ptr %A.addr, !13, !DIExpression(), !17) - call void @llvm.lifetime.start.p0(i64 4, ptr %i) #2, !dbg !22 + call void @llvm.lifetime.start.p0(ptr %i) #2, !dbg !22 #dbg_declare(ptr %i, !14, !DIExpression(), !23) store i32 0, ptr %i, align 4, !dbg !23, !tbaa !24 br label %for.cond, !dbg !22 @@ -27,7 +27,7 @@ for.cond: ; preds = %for.inc, %entry br i1 %cmp, label %for.body, label %for.cond.cleanup, !dbg !31, !prof !32 for.cond.cleanup: ; preds = %for.cond - call void @llvm.lifetime.end.p0(i64 4, ptr %i) #2, !dbg !33 + call void @llvm.lifetime.end.p0(ptr %i) #2, !dbg !33 br label %for.end for.body: ; preds = %for.cond @@ -49,10 +49,10 @@ for.end: ; preds = %for.cond.cleanup } ; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 ; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 ; Function Attrs: nounwind uwtable define dso_local void @bar(ptr %A) #0 !dbg !41 { @@ -61,7 +61,7 @@ entry: %i = alloca i32, align 4 store ptr %A, ptr %A.addr, align 8, !tbaa !18 #dbg_declare(ptr %A.addr, !43, !DIExpression(), !46) - call void @llvm.lifetime.start.p0(i64 4, ptr %i) #2, !dbg !47 + call void @llvm.lifetime.start.p0(ptr %i) #2, !dbg !47 #dbg_declare(ptr %i, !44, !DIExpression(), !48) store i32 0, ptr %i, align 4, !dbg !48, !tbaa !24 br label %for.cond, !dbg !47 @@ -74,7 +74,7 @@ for.cond: ; preds = %for.inc, %entry br i1 %cmp, label %for.body, label %for.cond.cleanup, !dbg !54 for.cond.cleanup: ; preds = %for.cond - call void @llvm.lifetime.end.p0(i64 4, ptr %i) #2, !dbg !55 + call void @llvm.lifetime.end.p0(ptr %i) #2, !dbg !55 br label %for.end for.body: ; preds = %for.cond diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.expected b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.expected index 6504830..4bae52e 100644 --- a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.expected +++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.expected @@ -16,7 +16,7 @@ define dso_local void @foo(ptr %A) #0 !dbg !7 { ; CHECK-NEXT: #dbg_assign(i1 undef, [[META13:![0-9]+]], !DIExpression(), [[DIASSIGNID16]], ptr [[A_ADDR]], !DIExpression(), [[META17:![0-9]+]]) ; CHECK-NEXT: store ptr [[A:%.*]], ptr [[A_ADDR]], align 8, !tbaa [[TBAA18:![0-9]+]] ; CHECK-NEXT: #dbg_declare(ptr [[A_ADDR]], [[META13]], !DIExpression(), [[META17]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I]]) #[[ATTR2:[0-9]+]], !dbg [[DBG22:![0-9]+]] +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I]]) #[[ATTR2:[0-9]+]], !dbg [[DBG22:![0-9]+]] ; CHECK-NEXT: #dbg_declare(ptr [[I]], [[META14:![0-9]+]], !DIExpression(), [[META23:![0-9]+]]) ; CHECK-NEXT: store i32 0, ptr [[I]], align 4, !dbg [[META23]], !tbaa [[TBAA24:![0-9]+]] ; CHECK-NEXT: br label [[FOR_COND:%.*]], !dbg [[DBG22]] @@ -27,7 +27,7 @@ define dso_local void @foo(ptr %A) #0 !dbg !7 { ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP0]], [[TMP2]], !dbg [[DBG30:![0-9]+]] ; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_COND_CLEANUP:%.*]], !dbg [[DBG31:![0-9]+]], !prof [[PROF32:![0-9]+]] ; CHECK: for.cond.cleanup: -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I]]) #[[ATTR2]], !dbg [[DBG33:![0-9]+]] +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[I]]) #[[ATTR2]], !dbg [[DBG33:![0-9]+]] ; CHECK-NEXT: br label [[FOR_END:%.*]] ; CHECK: for.body: ; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !dbg [[DBG34:![0-9]+]], !tbaa [[TBAA18]] @@ -50,7 +50,7 @@ entry: #dbg_assign(i1 undef, !13, !DIExpression(), !16, ptr %A.addr, !DIExpression(), !17) store ptr %A, ptr %A.addr, align 8, !tbaa !18 #dbg_declare(ptr %A.addr, !13, !DIExpression(), !17) - call void @llvm.lifetime.start.p0(i64 4, ptr %i) #2, !dbg !22 + call void @llvm.lifetime.start.p0(ptr %i) #2, !dbg !22 #dbg_declare(ptr %i, !14, !DIExpression(), !23) store i32 0, ptr %i, align 4, !dbg !23, !tbaa !24 br label %for.cond, !dbg !22 @@ -63,7 +63,7 @@ for.cond: ; preds = %for.inc, %entry br i1 %cmp, label %for.body, label %for.cond.cleanup, !dbg !31, !prof !32 for.cond.cleanup: ; preds = %for.cond - call void @llvm.lifetime.end.p0(i64 4, ptr %i) #2, !dbg !33 + call void @llvm.lifetime.end.p0(ptr %i) #2, !dbg !33 br label %for.end for.body: ; preds = %for.cond @@ -85,10 +85,10 @@ for.end: ; preds = %for.cond.cleanup } ; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 ; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 ; Function Attrs: nounwind uwtable define dso_local void @bar(ptr %A) #0 !dbg !41 { @@ -98,7 +98,7 @@ define dso_local void @bar(ptr %A) #0 !dbg !41 { ; CHECK-NEXT: [[I:%.*]] = alloca i32, align 4 ; CHECK-NEXT: store ptr [[A:%.*]], ptr [[A_ADDR]], align 8, !tbaa [[TBAA18]] ; CHECK-NEXT: #dbg_declare(ptr [[A_ADDR]], [[META43:![0-9]+]], !DIExpression(), [[META46:![0-9]+]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I]]) #[[ATTR2]], !dbg [[DBG47:![0-9]+]] +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I]]) #[[ATTR2]], !dbg [[DBG47:![0-9]+]] ; CHECK-NEXT: #dbg_declare(ptr [[I]], [[META44:![0-9]+]], !DIExpression(), [[META48:![0-9]+]]) ; CHECK-NEXT: store i32 0, ptr [[I]], align 4, !dbg [[META48]], !tbaa [[TBAA24]] ; CHECK-NEXT: br label [[FOR_COND:%.*]], !dbg [[DBG47]] @@ -109,7 +109,7 @@ define dso_local void @bar(ptr %A) #0 !dbg !41 { ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP0]], [[TMP2]], !dbg [[DBG53:![0-9]+]] ; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_COND_CLEANUP:%.*]], !dbg [[DBG54:![0-9]+]] ; CHECK: for.cond.cleanup: -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I]]) #[[ATTR2]], !dbg [[DBG55:![0-9]+]] +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[I]]) #[[ATTR2]], !dbg [[DBG55:![0-9]+]] ; CHECK-NEXT: br label [[FOR_END:%.*]] ; CHECK: for.body: ; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !dbg [[DBG56:![0-9]+]], !tbaa [[TBAA18]] @@ -131,7 +131,7 @@ entry: %i = alloca i32, align 4 store ptr %A, ptr %A.addr, align 8, !tbaa !18 #dbg_declare(ptr %A.addr, !43, !DIExpression(), !46) - call void @llvm.lifetime.start.p0(i64 4, ptr %i) #2, !dbg !47 + call void @llvm.lifetime.start.p0(ptr %i) #2, !dbg !47 #dbg_declare(ptr %i, !44, !DIExpression(), !48) store i32 0, ptr %i, align 4, !dbg !48, !tbaa !24 br label %for.cond, !dbg !47 @@ -144,7 +144,7 @@ for.cond: ; preds = %for.inc, %entry br i1 %cmp, label %for.body, label %for.cond.cleanup, !dbg !54 for.cond.cleanup: ; preds = %for.cond - call void @llvm.lifetime.end.p0(i64 4, ptr %i) #2, !dbg !55 + call void @llvm.lifetime.end.p0(ptr %i) #2, !dbg !55 br label %for.end for.body: ; preds = %for.cond diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.expected b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.expected index 7c1ea5e..12c6e4e 100644 --- a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.expected +++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.expected @@ -17,7 +17,7 @@ define dso_local void @foo(ptr %A) #0 !dbg !7 { ; CHECK-NEXT: #dbg_assign(i1 undef, [[META13:![0-9]+]], !DIExpression(), [[DIASSIGNID16]], ptr [[A_ADDR]], !DIExpression(), [[META17:![0-9]+]]) ; CHECK-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8, !tbaa [[TBAA18:![0-9]+]] ; CHECK-NEXT: #dbg_declare(ptr [[A_ADDR]], [[META13]], !DIExpression(), [[META17]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I]]) #[[ATTR2:[0-9]+]], !dbg [[DBG22:![0-9]+]] +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I]]) #[[ATTR2:[0-9]+]], !dbg [[DBG22:![0-9]+]] ; CHECK-NEXT: #dbg_declare(ptr [[I]], [[META14:![0-9]+]], !DIExpression(), [[META23:![0-9]+]]) ; CHECK-NEXT: store i32 0, ptr [[I]], align 4, !dbg [[META23]], !tbaa [[TBAA24:![0-9]+]] ; CHECK-NEXT: br label [[FOR_COND:%.*]], !dbg [[DBG22]] @@ -28,7 +28,7 @@ define dso_local void @foo(ptr %A) #0 !dbg !7 { ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP0]], [[TMP2]], !dbg [[DBG30:![0-9]+]] ; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_COND_CLEANUP:%.*]], !dbg [[DBG31:![0-9]+]], !prof [[PROF32:![0-9]+]] ; CHECK: for.cond.cleanup: -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I]]) #[[ATTR2]], !dbg [[DBG33:![0-9]+]] +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[I]]) #[[ATTR2]], !dbg [[DBG33:![0-9]+]] ; CHECK-NEXT: br label [[FOR_END:%.*]] ; CHECK: for.body: ; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !dbg [[DBG34:![0-9]+]], !tbaa [[TBAA18]] @@ -51,7 +51,7 @@ entry: #dbg_assign(i1 undef, !13, !DIExpression(), !16, ptr %A.addr, !DIExpression(), !17) store ptr %A, ptr %A.addr, align 8, !tbaa !18 #dbg_declare(ptr %A.addr, !13, !DIExpression(), !17) - call void @llvm.lifetime.start.p0(i64 4, ptr %i) #2, !dbg !22 + call void @llvm.lifetime.start.p0(ptr %i) #2, !dbg !22 #dbg_declare(ptr %i, !14, !DIExpression(), !23) store i32 0, ptr %i, align 4, !dbg !23, !tbaa !24 br label %for.cond, !dbg !22 @@ -64,7 +64,7 @@ for.cond: ; preds = %for.inc, %entry br i1 %cmp, label %for.body, label %for.cond.cleanup, !dbg !31, !prof !32 for.cond.cleanup: ; preds = %for.cond - call void @llvm.lifetime.end.p0(i64 4, ptr %i) #2, !dbg !33 + call void @llvm.lifetime.end.p0(ptr %i) #2, !dbg !33 br label %for.end for.body: ; preds = %for.cond @@ -86,10 +86,10 @@ for.end: ; preds = %for.cond.cleanup } ; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 ; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 ; Function Attrs: nounwind uwtable define dso_local void @bar(ptr %A) #0 !dbg !41 { @@ -100,7 +100,7 @@ define dso_local void @bar(ptr %A) #0 !dbg !41 { ; CHECK-NEXT: [[I:%.*]] = alloca i32, align 4 ; CHECK-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8, !tbaa [[TBAA18]] ; CHECK-NEXT: #dbg_declare(ptr [[A_ADDR]], [[META43:![0-9]+]], !DIExpression(), [[META46:![0-9]+]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I]]) #[[ATTR2]], !dbg [[DBG47:![0-9]+]] +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I]]) #[[ATTR2]], !dbg [[DBG47:![0-9]+]] ; CHECK-NEXT: #dbg_declare(ptr [[I]], [[META44:![0-9]+]], !DIExpression(), [[META48:![0-9]+]]) ; CHECK-NEXT: store i32 0, ptr [[I]], align 4, !dbg [[META48]], !tbaa [[TBAA24]] ; CHECK-NEXT: br label [[FOR_COND:%.*]], !dbg [[DBG47]] @@ -111,7 +111,7 @@ define dso_local void @bar(ptr %A) #0 !dbg !41 { ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP0]], [[TMP2]], !dbg [[DBG53:![0-9]+]] ; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_COND_CLEANUP:%.*]], !dbg [[DBG54:![0-9]+]] ; CHECK: for.cond.cleanup: -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I]]) #[[ATTR2]], !dbg [[DBG55:![0-9]+]] +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[I]]) #[[ATTR2]], !dbg [[DBG55:![0-9]+]] ; CHECK-NEXT: br label [[FOR_END:%.*]] ; CHECK: for.body: ; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !dbg [[DBG56:![0-9]+]], !tbaa [[TBAA18]] @@ -133,7 +133,7 @@ entry: %i = alloca i32, align 4 store ptr %A, ptr %A.addr, align 8, !tbaa !18 #dbg_declare(ptr %A.addr, !43, !DIExpression(), !46) - call void @llvm.lifetime.start.p0(i64 4, ptr %i) #2, !dbg !47 + call void @llvm.lifetime.start.p0(ptr %i) #2, !dbg !47 #dbg_declare(ptr %i, !44, !DIExpression(), !48) store i32 0, ptr %i, align 4, !dbg !48, !tbaa !24 br label %for.cond, !dbg !47 @@ -146,7 +146,7 @@ for.cond: ; preds = %for.inc, %entry br i1 %cmp, label %for.body, label %for.cond.cleanup, !dbg !54 for.cond.cleanup: ; preds = %for.cond - call void @llvm.lifetime.end.p0(i64 4, ptr %i) #2, !dbg !55 + call void @llvm.lifetime.end.p0(ptr %i) #2, !dbg !55 br label %for.end for.body: ; preds = %for.cond diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.globals.expected b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.globals.expected index 94af952..d67a303 100644 --- a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.globals.expected +++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.globals.expected @@ -17,7 +17,7 @@ define dso_local void @foo(ptr %A) #0 !dbg !7 { ; CHECK-NEXT: #dbg_assign(i1 undef, [[META13:![0-9]+]], !DIExpression(), [[DIASSIGNID16]], ptr [[A_ADDR]], !DIExpression(), [[META17:![0-9]+]]) ; CHECK-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8, !tbaa [[TBAA18:![0-9]+]] ; CHECK-NEXT: #dbg_declare(ptr [[A_ADDR]], [[META13]], !DIExpression(), [[META17]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I]]) #[[ATTR2:[0-9]+]], !dbg [[DBG22:![0-9]+]] +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I]]) #[[ATTR2:[0-9]+]], !dbg [[DBG22:![0-9]+]] ; CHECK-NEXT: #dbg_declare(ptr [[I]], [[META14:![0-9]+]], !DIExpression(), [[META23:![0-9]+]]) ; CHECK-NEXT: store i32 0, ptr [[I]], align 4, !dbg [[META23]], !tbaa [[TBAA24:![0-9]+]] ; CHECK-NEXT: br label [[FOR_COND:%.*]], !dbg [[DBG22]] @@ -28,7 +28,7 @@ define dso_local void @foo(ptr %A) #0 !dbg !7 { ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP0]], [[TMP2]], !dbg [[DBG30:![0-9]+]] ; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_COND_CLEANUP:%.*]], !dbg [[DBG31:![0-9]+]], !prof [[PROF32:![0-9]+]] ; CHECK: for.cond.cleanup: -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I]]) #[[ATTR2]], !dbg [[DBG33:![0-9]+]] +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[I]]) #[[ATTR2]], !dbg [[DBG33:![0-9]+]] ; CHECK-NEXT: br label [[FOR_END:%.*]] ; CHECK: for.body: ; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !dbg [[DBG34:![0-9]+]], !tbaa [[TBAA18]] @@ -51,7 +51,7 @@ entry: #dbg_assign(i1 undef, !13, !DIExpression(), !16, ptr %A.addr, !DIExpression(), !17) store ptr %A, ptr %A.addr, align 8, !tbaa !18 #dbg_declare(ptr %A.addr, !13, !DIExpression(), !17) - call void @llvm.lifetime.start.p0(i64 4, ptr %i) #2, !dbg !22 + call void @llvm.lifetime.start.p0(ptr %i) #2, !dbg !22 #dbg_declare(ptr %i, !14, !DIExpression(), !23) store i32 0, ptr %i, align 4, !dbg !23, !tbaa !24 br label %for.cond, !dbg !22 @@ -64,7 +64,7 @@ for.cond: ; preds = %for.inc, %entry br i1 %cmp, label %for.body, label %for.cond.cleanup, !dbg !31, !prof !32 for.cond.cleanup: ; preds = %for.cond - call void @llvm.lifetime.end.p0(i64 4, ptr %i) #2, !dbg !33 + call void @llvm.lifetime.end.p0(ptr %i) #2, !dbg !33 br label %for.end for.body: ; preds = %for.cond @@ -86,10 +86,10 @@ for.end: ; preds = %for.cond.cleanup } ; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 ; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 ; Function Attrs: nounwind uwtable define dso_local void @bar(ptr %A) #0 !dbg !41 { @@ -100,7 +100,7 @@ define dso_local void @bar(ptr %A) #0 !dbg !41 { ; CHECK-NEXT: [[I:%.*]] = alloca i32, align 4 ; CHECK-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8, !tbaa [[TBAA18]] ; CHECK-NEXT: #dbg_declare(ptr [[A_ADDR]], [[META43:![0-9]+]], !DIExpression(), [[META46:![0-9]+]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I]]) #[[ATTR2]], !dbg [[DBG47:![0-9]+]] +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I]]) #[[ATTR2]], !dbg [[DBG47:![0-9]+]] ; CHECK-NEXT: #dbg_declare(ptr [[I]], [[META44:![0-9]+]], !DIExpression(), [[META48:![0-9]+]]) ; CHECK-NEXT: store i32 0, ptr [[I]], align 4, !dbg [[META48]], !tbaa [[TBAA24]] ; CHECK-NEXT: br label [[FOR_COND:%.*]], !dbg [[DBG47]] @@ -111,7 +111,7 @@ define dso_local void @bar(ptr %A) #0 !dbg !41 { ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP0]], [[TMP2]], !dbg [[DBG53:![0-9]+]] ; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_COND_CLEANUP:%.*]], !dbg [[DBG54:![0-9]+]] ; CHECK: for.cond.cleanup: -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I]]) #[[ATTR2]], !dbg [[DBG55:![0-9]+]] +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[I]]) #[[ATTR2]], !dbg [[DBG55:![0-9]+]] ; CHECK-NEXT: br label [[FOR_END:%.*]] ; CHECK: for.body: ; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !dbg [[DBG56:![0-9]+]], !tbaa [[TBAA18]] @@ -133,7 +133,7 @@ entry: %i = alloca i32, align 4 store ptr %A, ptr %A.addr, align 8, !tbaa !18 #dbg_declare(ptr %A.addr, !43, !DIExpression(), !46) - call void @llvm.lifetime.start.p0(i64 4, ptr %i) #2, !dbg !47 + call void @llvm.lifetime.start.p0(ptr %i) #2, !dbg !47 #dbg_declare(ptr %i, !44, !DIExpression(), !48) store i32 0, ptr %i, align 4, !dbg !48, !tbaa !24 br label %for.cond, !dbg !47 @@ -146,7 +146,7 @@ for.cond: ; preds = %for.inc, %entry br i1 %cmp, label %for.body, label %for.cond.cleanup, !dbg !54 for.cond.cleanup: ; preds = %for.cond - call void @llvm.lifetime.end.p0(i64 4, ptr %i) #2, !dbg !55 + call void @llvm.lifetime.end.p0(ptr %i) #2, !dbg !55 br label %for.end for.body: ; preds = %for.cond diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.noglobals.expected b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.noglobals.expected index 6504830..4bae52e 100644 --- a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.noglobals.expected +++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.noglobals.expected @@ -16,7 +16,7 @@ define dso_local void @foo(ptr %A) #0 !dbg !7 { ; CHECK-NEXT: #dbg_assign(i1 undef, [[META13:![0-9]+]], !DIExpression(), [[DIASSIGNID16]], ptr [[A_ADDR]], !DIExpression(), [[META17:![0-9]+]]) ; CHECK-NEXT: store ptr [[A:%.*]], ptr [[A_ADDR]], align 8, !tbaa [[TBAA18:![0-9]+]] ; CHECK-NEXT: #dbg_declare(ptr [[A_ADDR]], [[META13]], !DIExpression(), [[META17]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I]]) #[[ATTR2:[0-9]+]], !dbg [[DBG22:![0-9]+]] +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I]]) #[[ATTR2:[0-9]+]], !dbg [[DBG22:![0-9]+]] ; CHECK-NEXT: #dbg_declare(ptr [[I]], [[META14:![0-9]+]], !DIExpression(), [[META23:![0-9]+]]) ; CHECK-NEXT: store i32 0, ptr [[I]], align 4, !dbg [[META23]], !tbaa [[TBAA24:![0-9]+]] ; CHECK-NEXT: br label [[FOR_COND:%.*]], !dbg [[DBG22]] @@ -27,7 +27,7 @@ define dso_local void @foo(ptr %A) #0 !dbg !7 { ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP0]], [[TMP2]], !dbg [[DBG30:![0-9]+]] ; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_COND_CLEANUP:%.*]], !dbg [[DBG31:![0-9]+]], !prof [[PROF32:![0-9]+]] ; CHECK: for.cond.cleanup: -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I]]) #[[ATTR2]], !dbg [[DBG33:![0-9]+]] +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[I]]) #[[ATTR2]], !dbg [[DBG33:![0-9]+]] ; CHECK-NEXT: br label [[FOR_END:%.*]] ; CHECK: for.body: ; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !dbg [[DBG34:![0-9]+]], !tbaa [[TBAA18]] @@ -50,7 +50,7 @@ entry: #dbg_assign(i1 undef, !13, !DIExpression(), !16, ptr %A.addr, !DIExpression(), !17) store ptr %A, ptr %A.addr, align 8, !tbaa !18 #dbg_declare(ptr %A.addr, !13, !DIExpression(), !17) - call void @llvm.lifetime.start.p0(i64 4, ptr %i) #2, !dbg !22 + call void @llvm.lifetime.start.p0(ptr %i) #2, !dbg !22 #dbg_declare(ptr %i, !14, !DIExpression(), !23) store i32 0, ptr %i, align 4, !dbg !23, !tbaa !24 br label %for.cond, !dbg !22 @@ -63,7 +63,7 @@ for.cond: ; preds = %for.inc, %entry br i1 %cmp, label %for.body, label %for.cond.cleanup, !dbg !31, !prof !32 for.cond.cleanup: ; preds = %for.cond - call void @llvm.lifetime.end.p0(i64 4, ptr %i) #2, !dbg !33 + call void @llvm.lifetime.end.p0(ptr %i) #2, !dbg !33 br label %for.end for.body: ; preds = %for.cond @@ -85,10 +85,10 @@ for.end: ; preds = %for.cond.cleanup } ; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 ; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 ; Function Attrs: nounwind uwtable define dso_local void @bar(ptr %A) #0 !dbg !41 { @@ -98,7 +98,7 @@ define dso_local void @bar(ptr %A) #0 !dbg !41 { ; CHECK-NEXT: [[I:%.*]] = alloca i32, align 4 ; CHECK-NEXT: store ptr [[A:%.*]], ptr [[A_ADDR]], align 8, !tbaa [[TBAA18]] ; CHECK-NEXT: #dbg_declare(ptr [[A_ADDR]], [[META43:![0-9]+]], !DIExpression(), [[META46:![0-9]+]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I]]) #[[ATTR2]], !dbg [[DBG47:![0-9]+]] +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I]]) #[[ATTR2]], !dbg [[DBG47:![0-9]+]] ; CHECK-NEXT: #dbg_declare(ptr [[I]], [[META44:![0-9]+]], !DIExpression(), [[META48:![0-9]+]]) ; CHECK-NEXT: store i32 0, ptr [[I]], align 4, !dbg [[META48]], !tbaa [[TBAA24]] ; CHECK-NEXT: br label [[FOR_COND:%.*]], !dbg [[DBG47]] @@ -109,7 +109,7 @@ define dso_local void @bar(ptr %A) #0 !dbg !41 { ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP0]], [[TMP2]], !dbg [[DBG53:![0-9]+]] ; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_COND_CLEANUP:%.*]], !dbg [[DBG54:![0-9]+]] ; CHECK: for.cond.cleanup: -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I]]) #[[ATTR2]], !dbg [[DBG55:![0-9]+]] +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[I]]) #[[ATTR2]], !dbg [[DBG55:![0-9]+]] ; CHECK-NEXT: br label [[FOR_END:%.*]] ; CHECK: for.body: ; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !dbg [[DBG56:![0-9]+]], !tbaa [[TBAA18]] @@ -131,7 +131,7 @@ entry: %i = alloca i32, align 4 store ptr %A, ptr %A.addr, align 8, !tbaa !18 #dbg_declare(ptr %A.addr, !43, !DIExpression(), !46) - call void @llvm.lifetime.start.p0(i64 4, ptr %i) #2, !dbg !47 + call void @llvm.lifetime.start.p0(ptr %i) #2, !dbg !47 #dbg_declare(ptr %i, !44, !DIExpression(), !48) store i32 0, ptr %i, align 4, !dbg !48, !tbaa !24 br label %for.cond, !dbg !47 @@ -144,7 +144,7 @@ for.cond: ; preds = %for.inc, %entry br i1 %cmp, label %for.body, label %for.cond.cleanup, !dbg !54 for.cond.cleanup: ; preds = %for.cond - call void @llvm.lifetime.end.p0(i64 4, ptr %i) #2, !dbg !55 + call void @llvm.lifetime.end.p0(ptr %i) #2, !dbg !55 br label %for.end for.body: ; preds = %for.cond diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.transitiveglobals.expected b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.transitiveglobals.expected index a656c4ae..fb3a76f 100644 --- a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.transitiveglobals.expected +++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.transitiveglobals.expected @@ -16,7 +16,7 @@ define dso_local void @foo(ptr %A) #0 !dbg !7 { ; CHECK-NEXT: #dbg_assign(i1 undef, [[META13:![0-9]+]], !DIExpression(), [[DIASSIGNID16]], ptr [[A_ADDR]], !DIExpression(), [[META17:![0-9]+]]) ; CHECK-NEXT: store ptr [[A:%.*]], ptr [[A_ADDR]], align 8, !tbaa [[TBAA18:![0-9]+]] ; CHECK-NEXT: #dbg_declare(ptr [[A_ADDR]], [[META13]], !DIExpression(), [[META17]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I]]) #[[ATTR2:[0-9]+]], !dbg [[DBG22:![0-9]+]] +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I]]) #[[ATTR2:[0-9]+]], !dbg [[DBG22:![0-9]+]] ; CHECK-NEXT: #dbg_declare(ptr [[I]], [[META14:![0-9]+]], !DIExpression(), [[META23:![0-9]+]]) ; CHECK-NEXT: store i32 0, ptr [[I]], align 4, !dbg [[META23]], !tbaa [[TBAA24:![0-9]+]] ; CHECK-NEXT: br label [[FOR_COND:%.*]], !dbg [[DBG22]] @@ -27,7 +27,7 @@ define dso_local void @foo(ptr %A) #0 !dbg !7 { ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP0]], [[TMP2]], !dbg [[DBG30:![0-9]+]] ; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_COND_CLEANUP:%.*]], !dbg [[DBG31:![0-9]+]], !prof [[PROF32:![0-9]+]] ; CHECK: for.cond.cleanup: -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I]]) #[[ATTR2]], !dbg [[DBG33:![0-9]+]] +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[I]]) #[[ATTR2]], !dbg [[DBG33:![0-9]+]] ; CHECK-NEXT: br label [[FOR_END:%.*]] ; CHECK: for.body: ; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !dbg [[DBG34:![0-9]+]], !tbaa [[TBAA18]] @@ -50,7 +50,7 @@ entry: #dbg_assign(i1 undef, !13, !DIExpression(), !16, ptr %A.addr, !DIExpression(), !17) store ptr %A, ptr %A.addr, align 8, !tbaa !18 #dbg_declare(ptr %A.addr, !13, !DIExpression(), !17) - call void @llvm.lifetime.start.p0(i64 4, ptr %i) #2, !dbg !22 + call void @llvm.lifetime.start.p0(ptr %i) #2, !dbg !22 #dbg_declare(ptr %i, !14, !DIExpression(), !23) store i32 0, ptr %i, align 4, !dbg !23, !tbaa !24 br label %for.cond, !dbg !22 @@ -63,7 +63,7 @@ for.cond: ; preds = %for.inc, %entry br i1 %cmp, label %for.body, label %for.cond.cleanup, !dbg !31, !prof !32 for.cond.cleanup: ; preds = %for.cond - call void @llvm.lifetime.end.p0(i64 4, ptr %i) #2, !dbg !33 + call void @llvm.lifetime.end.p0(ptr %i) #2, !dbg !33 br label %for.end for.body: ; preds = %for.cond @@ -85,10 +85,10 @@ for.end: ; preds = %for.cond.cleanup } ; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 ; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 ; Function Attrs: nounwind uwtable define dso_local void @bar(ptr %A) #0 !dbg !41 { @@ -98,7 +98,7 @@ define dso_local void @bar(ptr %A) #0 !dbg !41 { ; CHECK-NEXT: [[I:%.*]] = alloca i32, align 4 ; CHECK-NEXT: store ptr [[A:%.*]], ptr [[A_ADDR]], align 8, !tbaa [[TBAA18]] ; CHECK-NEXT: #dbg_declare(ptr [[A_ADDR]], [[META43:![0-9]+]], !DIExpression(), [[META46:![0-9]+]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I]]) #[[ATTR2]], !dbg [[DBG47:![0-9]+]] +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I]]) #[[ATTR2]], !dbg [[DBG47:![0-9]+]] ; CHECK-NEXT: #dbg_declare(ptr [[I]], [[META44:![0-9]+]], !DIExpression(), [[META48:![0-9]+]]) ; CHECK-NEXT: store i32 0, ptr [[I]], align 4, !dbg [[META48]], !tbaa [[TBAA24]] ; CHECK-NEXT: br label [[FOR_COND:%.*]], !dbg [[DBG47]] @@ -109,7 +109,7 @@ define dso_local void @bar(ptr %A) #0 !dbg !41 { ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP0]], [[TMP2]], !dbg [[DBG53:![0-9]+]] ; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_COND_CLEANUP:%.*]], !dbg [[DBG54:![0-9]+]] ; CHECK: for.cond.cleanup: -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I]]) #[[ATTR2]], !dbg [[DBG55:![0-9]+]] +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[I]]) #[[ATTR2]], !dbg [[DBG55:![0-9]+]] ; CHECK-NEXT: br label [[FOR_END:%.*]] ; CHECK: for.body: ; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !dbg [[DBG56:![0-9]+]], !tbaa [[TBAA18]] @@ -131,7 +131,7 @@ entry: %i = alloca i32, align 4 store ptr %A, ptr %A.addr, align 8, !tbaa !18 #dbg_declare(ptr %A.addr, !43, !DIExpression(), !46) - call void @llvm.lifetime.start.p0(i64 4, ptr %i) #2, !dbg !47 + call void @llvm.lifetime.start.p0(ptr %i) #2, !dbg !47 #dbg_declare(ptr %i, !44, !DIExpression(), !48) store i32 0, ptr %i, align 4, !dbg !48, !tbaa !24 br label %for.cond, !dbg !47 @@ -144,7 +144,7 @@ for.cond: ; preds = %for.inc, %entry br i1 %cmp, label %for.body, label %for.cond.cleanup, !dbg !54 for.cond.cleanup: ; preds = %for.cond - call void @llvm.lifetime.end.p0(i64 4, ptr %i) #2, !dbg !55 + call void @llvm.lifetime.end.p0(ptr %i) #2, !dbg !55 br label %for.end for.body: ; preds = %for.cond diff --git a/llvm/test/tools/llvm-ir2vec/entities.ll b/llvm/test/tools/llvm-ir2vec/entities.ll index 737044c..4ed6400 100644 --- a/llvm/test/tools/llvm-ir2vec/entities.ll +++ b/llvm/test/tools/llvm-ir2vec/entities.ll @@ -1,6 +1,6 @@ ; RUN: llvm-ir2vec entities | FileCheck %s -CHECK: 92 +CHECK: 93 CHECK-NEXT: Ret 0 CHECK-NEXT: Br 1 CHECK-NEXT: Switch 2 @@ -48,48 +48,49 @@ CHECK-NEXT: SIToFP 43 CHECK-NEXT: FPTrunc 44 CHECK-NEXT: FPExt 45 CHECK-NEXT: PtrToInt 46 -CHECK-NEXT: IntToPtr 47 -CHECK-NEXT: BitCast 48 -CHECK-NEXT: AddrSpaceCast 49 -CHECK-NEXT: CleanupPad 50 -CHECK-NEXT: CatchPad 51 -CHECK-NEXT: ICmp 52 -CHECK-NEXT: FCmp 53 -CHECK-NEXT: PHI 54 -CHECK-NEXT: Call 55 -CHECK-NEXT: Select 56 -CHECK-NEXT: UserOp1 57 -CHECK-NEXT: UserOp2 58 -CHECK-NEXT: VAArg 59 -CHECK-NEXT: ExtractElement 60 -CHECK-NEXT: InsertElement 61 -CHECK-NEXT: ShuffleVector 62 -CHECK-NEXT: ExtractValue 63 -CHECK-NEXT: InsertValue 64 -CHECK-NEXT: LandingPad 65 -CHECK-NEXT: Freeze 66 -CHECK-NEXT: FloatTy 67 +CHECK-NEXT: PtrToAddr 47 +CHECK-NEXT: IntToPtr 48 +CHECK-NEXT: BitCast 49 +CHECK-NEXT: AddrSpaceCast 50 +CHECK-NEXT: CleanupPad 51 +CHECK-NEXT: CatchPad 52 +CHECK-NEXT: ICmp 53 +CHECK-NEXT: FCmp 54 +CHECK-NEXT: PHI 55 +CHECK-NEXT: Call 56 +CHECK-NEXT: Select 57 +CHECK-NEXT: UserOp1 58 +CHECK-NEXT: UserOp2 59 +CHECK-NEXT: VAArg 60 +CHECK-NEXT: ExtractElement 61 +CHECK-NEXT: InsertElement 62 +CHECK-NEXT: ShuffleVector 63 +CHECK-NEXT: ExtractValue 64 +CHECK-NEXT: InsertValue 65 +CHECK-NEXT: LandingPad 66 +CHECK-NEXT: Freeze 67 CHECK-NEXT: FloatTy 68 CHECK-NEXT: FloatTy 69 CHECK-NEXT: FloatTy 70 CHECK-NEXT: FloatTy 71 CHECK-NEXT: FloatTy 72 CHECK-NEXT: FloatTy 73 -CHECK-NEXT: VoidTy 74 -CHECK-NEXT: LabelTy 75 -CHECK-NEXT: MetadataTy 76 -CHECK-NEXT: UnknownTy 77 -CHECK-NEXT: TokenTy 78 -CHECK-NEXT: IntegerTy 79 -CHECK-NEXT: FunctionTy 80 -CHECK-NEXT: PointerTy 81 -CHECK-NEXT: StructTy 82 -CHECK-NEXT: ArrayTy 83 -CHECK-NEXT: VectorTy 84 +CHECK-NEXT: FloatTy 74 +CHECK-NEXT: VoidTy 75 +CHECK-NEXT: LabelTy 76 +CHECK-NEXT: MetadataTy 77 +CHECK-NEXT: UnknownTy 78 +CHECK-NEXT: TokenTy 79 +CHECK-NEXT: IntegerTy 80 +CHECK-NEXT: FunctionTy 81 +CHECK-NEXT: PointerTy 82 +CHECK-NEXT: StructTy 83 +CHECK-NEXT: ArrayTy 84 CHECK-NEXT: VectorTy 85 -CHECK-NEXT: PointerTy 86 -CHECK-NEXT: UnknownTy 87 -CHECK-NEXT: Function 88 -CHECK-NEXT: Pointer 89 -CHECK-NEXT: Constant 90 -CHECK-NEXT: Variable 91 +CHECK-NEXT: VectorTy 86 +CHECK-NEXT: PointerTy 87 +CHECK-NEXT: UnknownTy 88 +CHECK-NEXT: Function 89 +CHECK-NEXT: Pointer 90 +CHECK-NEXT: Constant 91 +CHECK-NEXT: Variable 92 diff --git a/llvm/test/tools/llvm-ir2vec/triplets.ll b/llvm/test/tools/llvm-ir2vec/triplets.ll index a7fd9e4..6f64bab 100644 --- a/llvm/test/tools/llvm-ir2vec/triplets.ll +++ b/llvm/test/tools/llvm-ir2vec/triplets.ll @@ -25,41 +25,41 @@ entry: } ; TRIPLETS: MAX_RELATION=3 -; TRIPLETS-NEXT: 12 79 0 -; TRIPLETS-NEXT: 12 91 2 -; TRIPLETS-NEXT: 12 91 3 +; TRIPLETS-NEXT: 12 80 0 +; TRIPLETS-NEXT: 12 92 2 +; TRIPLETS-NEXT: 12 92 3 ; TRIPLETS-NEXT: 12 0 1 -; TRIPLETS-NEXT: 0 74 0 -; TRIPLETS-NEXT: 0 91 2 -; TRIPLETS-NEXT: 16 79 0 -; TRIPLETS-NEXT: 16 91 2 -; TRIPLETS-NEXT: 16 91 3 +; TRIPLETS-NEXT: 0 75 0 +; TRIPLETS-NEXT: 0 92 2 +; TRIPLETS-NEXT: 16 80 0 +; TRIPLETS-NEXT: 16 92 2 +; TRIPLETS-NEXT: 16 92 3 ; TRIPLETS-NEXT: 16 0 1 -; TRIPLETS-NEXT: 0 74 0 -; TRIPLETS-NEXT: 0 91 2 -; TRIPLETS-NEXT: 30 81 0 -; TRIPLETS-NEXT: 30 90 2 +; TRIPLETS-NEXT: 0 75 0 +; TRIPLETS-NEXT: 0 92 2 +; TRIPLETS-NEXT: 30 82 0 +; TRIPLETS-NEXT: 30 91 2 ; TRIPLETS-NEXT: 30 30 1 -; TRIPLETS-NEXT: 30 81 0 -; TRIPLETS-NEXT: 30 90 2 +; TRIPLETS-NEXT: 30 82 0 +; TRIPLETS-NEXT: 30 91 2 ; TRIPLETS-NEXT: 30 32 1 -; TRIPLETS-NEXT: 32 74 0 -; TRIPLETS-NEXT: 32 91 2 -; TRIPLETS-NEXT: 32 89 3 +; TRIPLETS-NEXT: 32 75 0 +; TRIPLETS-NEXT: 32 92 2 +; TRIPLETS-NEXT: 32 90 3 ; TRIPLETS-NEXT: 32 32 1 -; TRIPLETS-NEXT: 32 74 0 -; TRIPLETS-NEXT: 32 91 2 -; TRIPLETS-NEXT: 32 89 3 +; TRIPLETS-NEXT: 32 75 0 +; TRIPLETS-NEXT: 32 92 2 +; TRIPLETS-NEXT: 32 90 3 ; TRIPLETS-NEXT: 32 31 1 -; TRIPLETS-NEXT: 31 79 0 -; TRIPLETS-NEXT: 31 89 2 +; TRIPLETS-NEXT: 31 80 0 +; TRIPLETS-NEXT: 31 90 2 ; TRIPLETS-NEXT: 31 31 1 -; TRIPLETS-NEXT: 31 79 0 -; TRIPLETS-NEXT: 31 89 2 +; TRIPLETS-NEXT: 31 80 0 +; TRIPLETS-NEXT: 31 90 2 ; TRIPLETS-NEXT: 31 12 1 -; TRIPLETS-NEXT: 12 79 0 -; TRIPLETS-NEXT: 12 91 2 -; TRIPLETS-NEXT: 12 91 3 +; TRIPLETS-NEXT: 12 80 0 +; TRIPLETS-NEXT: 12 92 2 +; TRIPLETS-NEXT: 12 92 3 ; TRIPLETS-NEXT: 12 0 1 -; TRIPLETS-NEXT: 0 74 0 -; TRIPLETS-NEXT: 0 91 2 +; TRIPLETS-NEXT: 0 75 0 +; TRIPLETS-NEXT: 0 92 2 diff --git a/llvm/test/tools/llvm-mca/RISCV/SpacemitX60/rvv-arithmetic.s b/llvm/test/tools/llvm-mca/RISCV/SpacemitX60/rvv-arithmetic.s index 5cf5ed5..234a3e2 100644 --- a/llvm/test/tools/llvm-mca/RISCV/SpacemitX60/rvv-arithmetic.s +++ b/llvm/test/tools/llvm-mca/RISCV/SpacemitX60/rvv-arithmetic.s @@ -3002,357 +3002,357 @@ vwsub.wx v8, v16, x30 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu # CHECK-NEXT: 1 8 4.00 8 SMX60_VIEU[4] VWSUB_VX vwsub.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VAADDU_VV vaaddu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VAADDU_VV vaaddu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VAADDU_VV vaaddu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VAADDU_VV vaaddu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VAADDU_VV vaaddu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VAADDU_VV vaaddu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VAADDU_VV vaaddu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VAADDU_VV vaaddu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VAADDU_VV vaaddu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VAADDU_VV vaaddu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VAADDU_VV vaaddu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VAADDU_VV vaaddu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VAADDU_VX vaaddu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VAADDU_VX vaaddu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VAADDU_VX vaaddu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VAADDU_VX vaaddu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VAADDU_VX vaaddu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VAADDU_VX vaaddu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VAADDU_VX vaaddu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VAADDU_VX vaaddu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VAADDU_VX vaaddu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VAADDU_VX vaaddu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VAADDU_VX vaaddu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VAADDU_VX vaaddu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VAADD_VV vaadd.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VAADD_VV vaadd.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VAADD_VV vaadd.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VAADD_VV vaadd.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VAADD_VV vaadd.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VAADD_VV vaadd.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VAADD_VV vaadd.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VAADD_VV vaadd.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VAADD_VV vaadd.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VAADD_VV vaadd.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VAADD_VV vaadd.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VAADD_VV vaadd.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VAADD_VX vaadd.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VAADD_VX vaadd.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VAADD_VX vaadd.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VAADD_VX vaadd.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VAADD_VX vaadd.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VAADD_VX vaadd.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VAADD_VX vaadd.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VAADD_VX vaadd.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VAADD_VX vaadd.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VAADD_VX vaadd.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VAADD_VX vaadd.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VAADD_VX vaadd.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VASUBU_VV vasubu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VASUBU_VV vasubu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VASUBU_VV vasubu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VASUBU_VV vasubu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VASUBU_VV vasubu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VASUBU_VV vasubu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VASUBU_VV vasubu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VASUBU_VV vasubu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VASUBU_VV vasubu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VASUBU_VV vasubu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VASUBU_VV vasubu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VASUBU_VV vasubu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VASUBU_VX vasubu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VASUBU_VX vasubu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VASUBU_VX vasubu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VASUBU_VX vasubu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VASUBU_VX vasubu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VASUBU_VX vasubu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VASUBU_VX vasubu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VASUBU_VX vasubu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VASUBU_VX vasubu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VASUBU_VX vasubu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VASUBU_VX vasubu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VASUBU_VX vasubu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VASUB_VV vasub.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VASUB_VV vasub.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VASUB_VV vasub.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VASUB_VV vasub.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VASUB_VV vasub.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VASUB_VV vasub.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VASUB_VV vasub.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VASUB_VV vasub.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VASUB_VV vasub.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VASUB_VV vasub.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VASUB_VV vasub.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VASUB_VV vasub.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VASUB_VX vasub.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VASUB_VX vasub.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VASUB_VX vasub.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VASUB_VX vasub.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VASUB_VX vasub.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VASUB_VX vasub.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VASUB_VX vasub.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VASUB_VX vasub.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VASUB_VX vasub.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VASUB_VX vasub.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VASUB_VX vasub.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VASUB_VX vasub.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu # CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VMADC_VI vmadc.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu @@ -3882,445 +3882,445 @@ vwsub.wx v8, v16, x30 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu # CHECK-NEXT: 1 16 4.00 16 SMX60_VIEU[4] VRSUB_VX vrsub.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSADDU_VI vsaddu.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSADDU_VI vsaddu.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSADDU_VI vsaddu.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSADDU_VI vsaddu.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSADDU_VI vsaddu.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSADDU_VI vsaddu.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSADDU_VI vsaddu.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSADDU_VI vsaddu.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSADDU_VI vsaddu.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSADDU_VI vsaddu.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSADDU_VI vsaddu.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSADDU_VI vsaddu.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSADDU_VV vsaddu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSADDU_VV vsaddu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSADDU_VV vsaddu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSADDU_VV vsaddu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSADDU_VV vsaddu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSADDU_VV vsaddu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSADDU_VV vsaddu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSADDU_VV vsaddu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSADDU_VV vsaddu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSADDU_VV vsaddu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSADDU_VV vsaddu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSADDU_VV vsaddu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSADDU_VX vsaddu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSADDU_VX vsaddu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSADDU_VX vsaddu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSADDU_VX vsaddu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSADDU_VX vsaddu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSADDU_VX vsaddu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSADDU_VX vsaddu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSADDU_VX vsaddu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSADDU_VX vsaddu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSADDU_VX vsaddu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSADDU_VX vsaddu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSADDU_VX vsaddu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSADD_VI vsadd.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSADD_VI vsadd.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSADD_VI vsadd.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSADD_VI vsadd.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSADD_VI vsadd.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSADD_VI vsadd.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSADD_VI vsadd.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSADD_VI vsadd.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSADD_VI vsadd.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSADD_VI vsadd.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSADD_VI vsadd.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSADD_VI vsadd.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSADD_VV vsadd.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSADD_VV vsadd.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSADD_VV vsadd.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSADD_VV vsadd.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSADD_VV vsadd.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSADD_VV vsadd.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSADD_VV vsadd.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSADD_VV vsadd.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSADD_VV vsadd.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSADD_VV vsadd.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSADD_VV vsadd.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSADD_VV vsadd.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSADD_VX vsadd.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSADD_VX vsadd.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSADD_VX vsadd.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSADD_VX vsadd.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSADD_VX vsadd.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSADD_VX vsadd.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSADD_VX vsadd.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSADD_VX vsadd.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSADD_VX vsadd.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSADD_VX vsadd.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSADD_VX vsadd.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSADD_VX vsadd.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSUBU_VV vssubu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSSUBU_VV vssubu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSUBU_VV vssubu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSUBU_VV vssubu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSSUBU_VV vssubu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSUBU_VV vssubu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSUBU_VV vssubu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSSUBU_VV vssubu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSUBU_VV vssubu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSUBU_VV vssubu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSSUBU_VV vssubu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSUBU_VV vssubu.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSUBU_VX vssubu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSSUBU_VX vssubu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSUBU_VX vssubu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSUBU_VX vssubu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSSUBU_VX vssubu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSUBU_VX vssubu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSUBU_VX vssubu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSSUBU_VX vssubu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSUBU_VX vssubu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSUBU_VX vssubu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSSUBU_VX vssubu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSUBU_VX vssubu.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSUB_VV vssub.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSSUB_VV vssub.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSUB_VV vssub.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSUB_VV vssub.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSSUB_VV vssub.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSUB_VV vssub.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSUB_VV vssub.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSSUB_VV vssub.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSUB_VV vssub.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSUB_VV vssub.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSSUB_VV vssub.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSUB_VV vssub.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSUB_VX vssub.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSSUB_VX vssub.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSUB_VX vssub.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSUB_VX vssub.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSSUB_VX vssub.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSUB_VX vssub.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSUB_VX vssub.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSSUB_VX vssub.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSUB_VX vssub.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSUB_VX vssub.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5 +# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSSUB_VX vssub.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSUB_VX vssub.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu # CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VWADDU_WV vwaddu.wv v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu @@ -4574,7 +4574,7 @@ vwsub.wx v8, v16, x30 # CHECK: Resource pressure per iteration: # CHECK-NEXT: [0] [1] [2] [3.0] [3.1] [4] [5] [6] -# CHECK-NEXT: - 1120.00 - - - - 3292.00 - +# CHECK-NEXT: - 1120.00 - - - - 4084.00 - # CHECK: Resource pressure by instruction: # CHECK-NEXT: [0] [1] [2] [3.0] [3.1] [4] [5] [6] Instructions: @@ -5267,11 +5267,11 @@ vwsub.wx v8, v16, x30 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vaaddu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 2.00 - vaaddu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 4.00 - vaaddu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 8.00 - vaaddu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vaaddu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu @@ -5279,29 +5279,29 @@ vwsub.wx v8, v16, x30 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vaaddu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 2.00 - vaaddu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 4.00 - vaaddu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 8.00 - vaaddu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vaaddu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vaaddu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 2.00 - vaaddu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 4.00 - vaaddu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 8.00 - vaaddu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vaaddu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 2.00 - vaaddu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 4.00 - vaaddu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 8.00 - vaaddu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vaaddu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu @@ -5311,11 +5311,11 @@ vwsub.wx v8, v16, x30 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vaaddu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 2.00 - vaaddu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 4.00 - vaaddu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 8.00 - vaaddu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vaaddu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu @@ -5323,29 +5323,29 @@ vwsub.wx v8, v16, x30 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vaaddu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 2.00 - vaaddu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 4.00 - vaaddu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 8.00 - vaaddu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vaaddu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vaaddu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 2.00 - vaaddu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 4.00 - vaaddu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 8.00 - vaaddu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vaaddu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 2.00 - vaaddu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 4.00 - vaaddu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 8.00 - vaaddu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vaadd.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu @@ -5355,11 +5355,11 @@ vwsub.wx v8, v16, x30 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vaadd.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vaadd.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 2.00 - vaadd.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vaadd.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 4.00 - vaadd.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vaadd.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 8.00 - vaadd.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vaadd.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu @@ -5367,29 +5367,29 @@ vwsub.wx v8, v16, x30 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vaadd.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vaadd.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 2.00 - vaadd.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vaadd.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 4.00 - vaadd.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vaadd.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 8.00 - vaadd.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vaadd.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vaadd.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vaadd.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 2.00 - vaadd.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vaadd.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 4.00 - vaadd.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vaadd.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 8.00 - vaadd.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vaadd.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vaadd.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 2.00 - vaadd.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vaadd.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 4.00 - vaadd.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vaadd.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 8.00 - vaadd.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vaadd.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu @@ -5399,11 +5399,11 @@ vwsub.wx v8, v16, x30 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vaadd.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vaadd.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 2.00 - vaadd.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vaadd.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 4.00 - vaadd.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vaadd.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 8.00 - vaadd.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vaadd.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu @@ -5411,29 +5411,29 @@ vwsub.wx v8, v16, x30 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vaadd.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vaadd.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 2.00 - vaadd.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vaadd.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 4.00 - vaadd.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vaadd.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 8.00 - vaadd.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vaadd.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vaadd.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vaadd.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 2.00 - vaadd.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vaadd.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 4.00 - vaadd.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vaadd.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 8.00 - vaadd.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vaadd.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vaadd.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 2.00 - vaadd.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vaadd.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 4.00 - vaadd.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vaadd.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 8.00 - vaadd.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vasubu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu @@ -5443,11 +5443,11 @@ vwsub.wx v8, v16, x30 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vasubu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vasubu.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 2.00 - vasubu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vasubu.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 4.00 - vasubu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vasubu.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 8.00 - vasubu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vasubu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu @@ -5455,29 +5455,29 @@ vwsub.wx v8, v16, x30 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vasubu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vasubu.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 2.00 - vasubu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vasubu.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 4.00 - vasubu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vasubu.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 8.00 - vasubu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vasubu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vasubu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vasubu.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 2.00 - vasubu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vasubu.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 4.00 - vasubu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vasubu.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 8.00 - vasubu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vasubu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vasubu.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 2.00 - vasubu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vasubu.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 4.00 - vasubu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vasubu.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 8.00 - vasubu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vasubu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu @@ -5487,11 +5487,11 @@ vwsub.wx v8, v16, x30 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vasubu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vasubu.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 2.00 - vasubu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vasubu.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 4.00 - vasubu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vasubu.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 8.00 - vasubu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vasubu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu @@ -5499,29 +5499,29 @@ vwsub.wx v8, v16, x30 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vasubu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vasubu.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 2.00 - vasubu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vasubu.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 4.00 - vasubu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vasubu.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 8.00 - vasubu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vasubu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vasubu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vasubu.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 2.00 - vasubu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vasubu.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 4.00 - vasubu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vasubu.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 8.00 - vasubu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vasubu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vasubu.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 2.00 - vasubu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vasubu.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 4.00 - vasubu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vasubu.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 8.00 - vasubu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vasub.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu @@ -5531,11 +5531,11 @@ vwsub.wx v8, v16, x30 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vasub.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vasub.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 2.00 - vasub.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vasub.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 4.00 - vasub.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vasub.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 8.00 - vasub.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vasub.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu @@ -5543,29 +5543,29 @@ vwsub.wx v8, v16, x30 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vasub.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vasub.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 2.00 - vasub.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vasub.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 4.00 - vasub.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vasub.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 8.00 - vasub.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vasub.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vasub.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vasub.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 2.00 - vasub.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vasub.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 4.00 - vasub.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vasub.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 8.00 - vasub.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vasub.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vasub.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 2.00 - vasub.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vasub.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 4.00 - vasub.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vasub.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 8.00 - vasub.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vasub.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu @@ -5575,11 +5575,11 @@ vwsub.wx v8, v16, x30 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vasub.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vasub.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 2.00 - vasub.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vasub.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 4.00 - vasub.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vasub.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 8.00 - vasub.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vasub.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu @@ -5587,29 +5587,29 @@ vwsub.wx v8, v16, x30 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vasub.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vasub.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 2.00 - vasub.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vasub.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 4.00 - vasub.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vasub.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 8.00 - vasub.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vasub.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vasub.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vasub.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 2.00 - vasub.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vasub.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 4.00 - vasub.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vasub.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 8.00 - vasub.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vasub.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vasub.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 2.00 - vasub.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vasub.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 4.00 - vasub.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vasub.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 8.00 - vasub.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu # CHECK-NEXT: - - - - - - 4.00 - vmadc.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu @@ -6147,11 +6147,11 @@ vwsub.wx v8, v16, x30 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vsaddu.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 2.00 - vsaddu.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 4.00 - vsaddu.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 8.00 - vsaddu.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vsaddu.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu @@ -6159,29 +6159,29 @@ vwsub.wx v8, v16, x30 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vsaddu.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 2.00 - vsaddu.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 4.00 - vsaddu.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 8.00 - vsaddu.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vsaddu.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vsaddu.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 2.00 - vsaddu.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 4.00 - vsaddu.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 8.00 - vsaddu.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vsaddu.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 2.00 - vsaddu.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 4.00 - vsaddu.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 8.00 - vsaddu.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vsaddu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu @@ -6191,11 +6191,11 @@ vwsub.wx v8, v16, x30 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vsaddu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 2.00 - vsaddu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 4.00 - vsaddu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 8.00 - vsaddu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vsaddu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu @@ -6203,29 +6203,29 @@ vwsub.wx v8, v16, x30 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vsaddu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 2.00 - vsaddu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 4.00 - vsaddu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 8.00 - vsaddu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vsaddu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vsaddu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 2.00 - vsaddu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 4.00 - vsaddu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 8.00 - vsaddu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vsaddu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 2.00 - vsaddu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 4.00 - vsaddu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 8.00 - vsaddu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vsaddu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu @@ -6235,11 +6235,11 @@ vwsub.wx v8, v16, x30 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vsaddu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 2.00 - vsaddu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 4.00 - vsaddu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 8.00 - vsaddu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vsaddu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu @@ -6247,29 +6247,29 @@ vwsub.wx v8, v16, x30 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vsaddu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 2.00 - vsaddu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 4.00 - vsaddu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 8.00 - vsaddu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vsaddu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vsaddu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 2.00 - vsaddu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 4.00 - vsaddu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 8.00 - vsaddu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vsaddu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 2.00 - vsaddu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 4.00 - vsaddu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 8.00 - vsaddu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vsadd.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu @@ -6279,11 +6279,11 @@ vwsub.wx v8, v16, x30 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vsadd.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsadd.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 2.00 - vsadd.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsadd.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 4.00 - vsadd.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsadd.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 8.00 - vsadd.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vsadd.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu @@ -6291,29 +6291,29 @@ vwsub.wx v8, v16, x30 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vsadd.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsadd.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 2.00 - vsadd.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsadd.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 4.00 - vsadd.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsadd.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 8.00 - vsadd.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vsadd.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vsadd.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsadd.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 2.00 - vsadd.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsadd.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 4.00 - vsadd.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsadd.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 8.00 - vsadd.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vsadd.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsadd.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 2.00 - vsadd.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsadd.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 4.00 - vsadd.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsadd.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 8.00 - vsadd.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vsadd.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu @@ -6323,11 +6323,11 @@ vwsub.wx v8, v16, x30 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vsadd.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsadd.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 2.00 - vsadd.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsadd.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 4.00 - vsadd.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsadd.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 8.00 - vsadd.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vsadd.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu @@ -6335,29 +6335,29 @@ vwsub.wx v8, v16, x30 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vsadd.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsadd.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 2.00 - vsadd.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsadd.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 4.00 - vsadd.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsadd.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 8.00 - vsadd.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vsadd.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vsadd.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsadd.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 2.00 - vsadd.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsadd.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 4.00 - vsadd.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsadd.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 8.00 - vsadd.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vsadd.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsadd.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 2.00 - vsadd.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsadd.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 4.00 - vsadd.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsadd.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 8.00 - vsadd.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vsadd.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu @@ -6367,11 +6367,11 @@ vwsub.wx v8, v16, x30 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vsadd.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsadd.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 2.00 - vsadd.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsadd.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 4.00 - vsadd.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsadd.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 8.00 - vsadd.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vsadd.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu @@ -6379,29 +6379,29 @@ vwsub.wx v8, v16, x30 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vsadd.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsadd.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 2.00 - vsadd.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsadd.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 4.00 - vsadd.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsadd.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 8.00 - vsadd.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vsadd.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vsadd.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsadd.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 2.00 - vsadd.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsadd.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 4.00 - vsadd.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsadd.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 8.00 - vsadd.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vsadd.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsadd.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 2.00 - vsadd.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsadd.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 4.00 - vsadd.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsadd.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 8.00 - vsadd.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssubu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu @@ -6411,11 +6411,11 @@ vwsub.wx v8, v16, x30 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssubu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssubu.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 2.00 - vssubu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssubu.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 4.00 - vssubu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssubu.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 8.00 - vssubu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssubu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu @@ -6423,29 +6423,29 @@ vwsub.wx v8, v16, x30 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssubu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssubu.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 2.00 - vssubu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssubu.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 4.00 - vssubu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssubu.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 8.00 - vssubu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssubu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssubu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssubu.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 2.00 - vssubu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssubu.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 4.00 - vssubu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssubu.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 8.00 - vssubu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssubu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssubu.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 2.00 - vssubu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssubu.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 4.00 - vssubu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssubu.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 8.00 - vssubu.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssubu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu @@ -6455,11 +6455,11 @@ vwsub.wx v8, v16, x30 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssubu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssubu.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 2.00 - vssubu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssubu.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 4.00 - vssubu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssubu.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 8.00 - vssubu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssubu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu @@ -6467,29 +6467,29 @@ vwsub.wx v8, v16, x30 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssubu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssubu.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 2.00 - vssubu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssubu.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 4.00 - vssubu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssubu.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 8.00 - vssubu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssubu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssubu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssubu.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 2.00 - vssubu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssubu.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 4.00 - vssubu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssubu.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 8.00 - vssubu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssubu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssubu.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 2.00 - vssubu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssubu.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 4.00 - vssubu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssubu.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 8.00 - vssubu.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssub.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu @@ -6499,11 +6499,11 @@ vwsub.wx v8, v16, x30 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssub.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssub.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 2.00 - vssub.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssub.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 4.00 - vssub.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssub.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 8.00 - vssub.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssub.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu @@ -6511,29 +6511,29 @@ vwsub.wx v8, v16, x30 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssub.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssub.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 2.00 - vssub.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssub.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 4.00 - vssub.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssub.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 8.00 - vssub.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssub.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssub.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssub.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 2.00 - vssub.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssub.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 4.00 - vssub.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssub.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 8.00 - vssub.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssub.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssub.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 2.00 - vssub.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssub.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 4.00 - vssub.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssub.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 8.00 - vssub.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssub.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu @@ -6543,11 +6543,11 @@ vwsub.wx v8, v16, x30 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssub.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssub.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 2.00 - vssub.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssub.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 4.00 - vssub.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssub.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 8.00 - vssub.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssub.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu @@ -6555,29 +6555,29 @@ vwsub.wx v8, v16, x30 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssub.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssub.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 2.00 - vssub.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssub.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 4.00 - vssub.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssub.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 8.00 - vssub.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssub.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssub.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssub.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 2.00 - vssub.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssub.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 4.00 - vssub.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssub.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 8.00 - vssub.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssub.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssub.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 2.00 - vssub.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssub.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 4.00 - vssub.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssub.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 8.00 - vssub.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu # CHECK-NEXT: - - - - - - 4.00 - vwaddu.wv v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu diff --git a/llvm/test/tools/llvm-mca/RISCV/SpacemitX60/rvv-bitwise.s b/llvm/test/tools/llvm-mca/RISCV/SpacemitX60/rvv-bitwise.s index 89d3872..5a5f366 100644 --- a/llvm/test/tools/llvm-mca/RISCV/SpacemitX60/rvv-bitwise.s +++ b/llvm/test/tools/llvm-mca/RISCV/SpacemitX60/rvv-bitwise.s @@ -2630,269 +2630,269 @@ vssrl.vx v8, v8, x30 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu # CHECK-NEXT: 1 16 4.00 16 SMX60_VIEU[4] VSRL_VX vsrl.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSRA_VI vssra.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VSSRA_VI vssra.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSRA_VI vssra.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12 +# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSSRA_VI vssra.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSRA_VI vssra.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VSSRA_VI vssra.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSRA_VI vssra.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12 +# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSSRA_VI vssra.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSRA_VI vssra.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VSSRA_VI vssra.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSRA_VI vssra.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12 +# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSSRA_VI vssra.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSRA_VI vssra.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VSSRA_VI vssra.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSRA_VI vssra.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12 +# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSSRA_VI vssra.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSRA_VV vssra.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VSSRA_VV vssra.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSRA_VV vssra.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8 +# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSSRA_VV vssra.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSRA_VV vssra.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VSSRA_VV vssra.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSRA_VV vssra.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8 +# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSSRA_VV vssra.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSRA_VV vssra.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VSSRA_VV vssra.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSRA_VV vssra.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8 +# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSSRA_VV vssra.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSRA_VV vssra.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VSSRA_VV vssra.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSRA_VV vssra.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8 +# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSSRA_VV vssra.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSRA_VX vssra.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VSSRA_VX vssra.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSRA_VX vssra.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5 +# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSSRA_VX vssra.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSRA_VX vssra.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VSSRA_VX vssra.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSRA_VX vssra.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5 +# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSSRA_VX vssra.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSRA_VX vssra.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VSSRA_VX vssra.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSRA_VX vssra.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5 +# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSSRA_VX vssra.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSRA_VX vssra.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VSSRA_VX vssra.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSRA_VX vssra.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5 +# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSSRA_VX vssra.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSRL_VI vssrl.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VSSRL_VI vssrl.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSRL_VI vssrl.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12 +# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSSRL_VI vssrl.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSRL_VI vssrl.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VSSRL_VI vssrl.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSRL_VI vssrl.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12 +# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSSRL_VI vssrl.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSRL_VI vssrl.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VSSRL_VI vssrl.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSRL_VI vssrl.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12 +# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSSRL_VI vssrl.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSRL_VI vssrl.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VSSRL_VI vssrl.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSRL_VI vssrl.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12 +# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSSRL_VI vssrl.vi v8, v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSRL_VV vssrl.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VSSRL_VV vssrl.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSRL_VV vssrl.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8 +# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSSRL_VV vssrl.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSRL_VV vssrl.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VSSRL_VV vssrl.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSRL_VV vssrl.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8 +# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSSRL_VV vssrl.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSRL_VV vssrl.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VSSRL_VV vssrl.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSRL_VV vssrl.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8 +# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSSRL_VV vssrl.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSRL_VV vssrl.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VSSRL_VV vssrl.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSRL_VV vssrl.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8 +# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSSRL_VV vssrl.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSRL_VX vssrl.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VSSRL_VX vssrl.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSRL_VX vssrl.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5 +# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSSRL_VX vssrl.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSRL_VX vssrl.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VSSRL_VX vssrl.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSRL_VX vssrl.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5 +# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSSRL_VX vssrl.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSRL_VX vssrl.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VSSRL_VX vssrl.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSRL_VX vssrl.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5 +# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSSRL_VX vssrl.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSRL_VX vssrl.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VSSRL_VX vssrl.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSRL_VX vssrl.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5 +# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSSRL_VX vssrl.vx v8, v8, t5 # CHECK: Resources: # CHECK-NEXT: [0] - SMX60_FP @@ -2906,7 +2906,7 @@ vssrl.vx v8, v8, x30 # CHECK: Resource pressure per iteration: # CHECK-NEXT: [0] [1] [2] [3.0] [3.1] [4] [5] [6] -# CHECK-NEXT: - 708.00 - - - - 2436.00 - +# CHECK-NEXT: - 708.00 - - - - 3060.00 - # CHECK: Resource pressure by instruction: # CHECK-NEXT: [0] [1] [2] [3.0] [3.1] [4] [5] [6] Instructions: @@ -4069,43 +4069,43 @@ vssrl.vx v8, v8, x30 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf8, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssra.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssra.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 2.00 - vssra.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssra.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 4.00 - vssra.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssra.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 8.00 - vssra.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssra.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 16.00 - vssra.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssra.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssra.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssra.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 2.00 - vssra.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssra.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 4.00 - vssra.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssra.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 8.00 - vssra.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssra.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 16.00 - vssra.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssra.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssra.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 2.00 - vssra.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssra.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 4.00 - vssra.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssra.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 8.00 - vssra.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssra.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 16.00 - vssra.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssra.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 2.00 - vssra.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssra.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 4.00 - vssra.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssra.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 8.00 - vssra.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssra.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 16.00 - vssra.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssra.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu @@ -4113,43 +4113,43 @@ vssrl.vx v8, v8, x30 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf8, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssra.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssra.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 2.00 - vssra.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssra.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 4.00 - vssra.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssra.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 8.00 - vssra.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssra.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 16.00 - vssra.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssra.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssra.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssra.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 2.00 - vssra.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssra.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 4.00 - vssra.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssra.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 8.00 - vssra.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssra.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 16.00 - vssra.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssra.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssra.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 2.00 - vssra.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssra.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 4.00 - vssra.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssra.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 8.00 - vssra.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssra.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 16.00 - vssra.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssra.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 2.00 - vssra.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssra.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 4.00 - vssra.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssra.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 8.00 - vssra.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssra.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 16.00 - vssra.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssra.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu @@ -4157,43 +4157,43 @@ vssrl.vx v8, v8, x30 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf8, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssra.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssra.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 2.00 - vssra.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssra.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 4.00 - vssra.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssra.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 8.00 - vssra.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssra.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 16.00 - vssra.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssra.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssra.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssra.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 2.00 - vssra.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssra.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 4.00 - vssra.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssra.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 8.00 - vssra.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssra.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 16.00 - vssra.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssra.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssra.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 2.00 - vssra.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssra.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 4.00 - vssra.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssra.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 8.00 - vssra.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssra.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 16.00 - vssra.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssra.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 2.00 - vssra.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssra.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 4.00 - vssra.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssra.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 8.00 - vssra.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssra.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 16.00 - vssra.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssrl.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu @@ -4201,43 +4201,43 @@ vssrl.vx v8, v8, x30 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf8, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssrl.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssrl.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 2.00 - vssrl.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssrl.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 4.00 - vssrl.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssrl.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 8.00 - vssrl.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssrl.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 16.00 - vssrl.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssrl.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssrl.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssrl.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 2.00 - vssrl.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssrl.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 4.00 - vssrl.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssrl.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 8.00 - vssrl.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssrl.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 16.00 - vssrl.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssrl.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssrl.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 2.00 - vssrl.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssrl.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 4.00 - vssrl.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssrl.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 8.00 - vssrl.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssrl.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 16.00 - vssrl.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssrl.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 2.00 - vssrl.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssrl.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 4.00 - vssrl.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssrl.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 8.00 - vssrl.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssrl.vi v8, v8, 12 +# CHECK-NEXT: - - - - - - 16.00 - vssrl.vi v8, v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssrl.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu @@ -4245,43 +4245,43 @@ vssrl.vx v8, v8, x30 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf8, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssrl.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssrl.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 2.00 - vssrl.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssrl.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 4.00 - vssrl.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssrl.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 8.00 - vssrl.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssrl.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 16.00 - vssrl.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssrl.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssrl.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssrl.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 2.00 - vssrl.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssrl.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 4.00 - vssrl.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssrl.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 8.00 - vssrl.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssrl.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 16.00 - vssrl.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssrl.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssrl.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 2.00 - vssrl.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssrl.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 4.00 - vssrl.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssrl.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 8.00 - vssrl.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssrl.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 16.00 - vssrl.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssrl.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 2.00 - vssrl.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssrl.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 4.00 - vssrl.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssrl.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 8.00 - vssrl.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssrl.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 16.00 - vssrl.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssrl.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu @@ -4289,40 +4289,40 @@ vssrl.vx v8, v8, x30 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf8, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssrl.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssrl.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 2.00 - vssrl.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssrl.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 4.00 - vssrl.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssrl.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 8.00 - vssrl.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssrl.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 16.00 - vssrl.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssrl.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssrl.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssrl.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 2.00 - vssrl.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssrl.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 4.00 - vssrl.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssrl.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 8.00 - vssrl.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssrl.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 16.00 - vssrl.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vssrl.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssrl.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 2.00 - vssrl.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssrl.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 4.00 - vssrl.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssrl.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 8.00 - vssrl.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssrl.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 16.00 - vssrl.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssrl.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 2.00 - vssrl.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssrl.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 4.00 - vssrl.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssrl.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 8.00 - vssrl.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vssrl.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 16.00 - vssrl.vx v8, v8, t5 diff --git a/llvm/test/tools/llvm-mca/RISCV/SpacemitX60/rvv-mul-div.s b/llvm/test/tools/llvm-mca/RISCV/SpacemitX60/rvv-mul-div.s index 572ebf2..a166f15 100644 --- a/llvm/test/tools/llvm-mca/RISCV/SpacemitX60/rvv-mul-div.s +++ b/llvm/test/tools/llvm-mca/RISCV/SpacemitX60/rvv-mul-div.s @@ -1906,93 +1906,93 @@ vsmul.vx v8, v8, x30 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu # CHECK-NEXT: 1 8 4.00 8 SMX60_VIEU[4] VWMULSU_VX vwmulsu.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8 +# CHECK-NEXT: 1 7 1.00 7 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8 +# CHECK-NEXT: 1 7 1.00 7 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8 +# CHECK-NEXT: 1 7 1.00 7 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8 +# CHECK-NEXT: 1 7 4.00 7 SMX60_VIEU[4] VSMUL_VV vsmul.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSMUL_VV vsmul.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8 +# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSMUL_VV vsmul.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8 +# CHECK-NEXT: 1 32 32.00 32 SMX60_VIEU[32] VSMUL_VV vsmul.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8 +# CHECK-NEXT: 1 7 1.00 7 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8 +# CHECK-NEXT: 1 7 1.00 7 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8 +# CHECK-NEXT: 1 7 4.00 7 SMX60_VIEU[4] VSMUL_VV vsmul.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSMUL_VV vsmul.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8 +# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSMUL_VV vsmul.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8 +# CHECK-NEXT: 1 32 32.00 32 SMX60_VIEU[32] VSMUL_VV vsmul.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8 +# CHECK-NEXT: 1 7 1.00 7 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8 +# CHECK-NEXT: 1 7 4.00 7 SMX60_VIEU[4] VSMUL_VV vsmul.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSMUL_VV vsmul.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8 +# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSMUL_VV vsmul.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8 +# CHECK-NEXT: 1 32 32.00 32 SMX60_VIEU[32] VSMUL_VV vsmul.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8 +# CHECK-NEXT: 1 7 4.00 7 SMX60_VIEU[4] VSMUL_VV vsmul.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSMUL_VV vsmul.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8 +# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSMUL_VV vsmul.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8 +# CHECK-NEXT: 1 32 32.00 32 SMX60_VIEU[32] VSMUL_VV vsmul.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5 +# CHECK-NEXT: 1 7 1.00 7 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5 +# CHECK-NEXT: 1 7 1.00 7 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5 +# CHECK-NEXT: 1 7 1.00 7 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5 +# CHECK-NEXT: 1 7 4.00 7 SMX60_VIEU[4] VSMUL_VX vsmul.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSMUL_VX vsmul.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5 +# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSMUL_VX vsmul.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5 +# CHECK-NEXT: 1 32 32.00 32 SMX60_VIEU[32] VSMUL_VX vsmul.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5 +# CHECK-NEXT: 1 7 1.00 7 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5 +# CHECK-NEXT: 1 7 1.00 7 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5 +# CHECK-NEXT: 1 7 4.00 7 SMX60_VIEU[4] VSMUL_VX vsmul.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSMUL_VX vsmul.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5 +# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSMUL_VX vsmul.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5 +# CHECK-NEXT: 1 32 32.00 32 SMX60_VIEU[32] VSMUL_VX vsmul.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5 +# CHECK-NEXT: 1 7 1.00 7 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5 +# CHECK-NEXT: 1 7 4.00 7 SMX60_VIEU[4] VSMUL_VX vsmul.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSMUL_VX vsmul.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5 +# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSMUL_VX vsmul.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5 +# CHECK-NEXT: 1 32 32.00 32 SMX60_VIEU[32] VSMUL_VX vsmul.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5 +# CHECK-NEXT: 1 7 4.00 7 SMX60_VIEU[4] VSMUL_VX vsmul.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSMUL_VX vsmul.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5 +# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSMUL_VX vsmul.vx v8, v8, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5 +# CHECK-NEXT: 1 32 32.00 32 SMX60_VIEU[32] VSMUL_VX vsmul.vx v8, v8, t5 # CHECK: Resources: # CHECK-NEXT: [0] - SMX60_FP @@ -2006,7 +2006,7 @@ vsmul.vx v8, v8, x30 # CHECK: Resource pressure per iteration: # CHECK-NEXT: [0] [1] [2] [3.0] [3.1] [4] [5] [6] -# CHECK-NEXT: - 486.00 - - - - 3748.00 - +# CHECK-NEXT: - 486.00 - - - - 4196.00 - # CHECK: Resource pressure by instruction: # CHECK-NEXT: [0] [1] [2] [3.0] [3.1] [4] [5] [6] Instructions: @@ -2901,43 +2901,43 @@ vsmul.vx v8, v8, x30 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf8, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vsmul.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsmul.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 4.00 - vsmul.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsmul.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 8.00 - vsmul.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsmul.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 16.00 - vsmul.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsmul.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 32.00 - vsmul.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vsmul.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vsmul.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsmul.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 4.00 - vsmul.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsmul.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 8.00 - vsmul.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsmul.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 16.00 - vsmul.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsmul.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 32.00 - vsmul.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vsmul.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsmul.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 4.00 - vsmul.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsmul.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 8.00 - vsmul.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsmul.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 16.00 - vsmul.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsmul.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 32.00 - vsmul.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsmul.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 4.00 - vsmul.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsmul.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 8.00 - vsmul.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsmul.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 16.00 - vsmul.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsmul.vv v8, v8, v8 +# CHECK-NEXT: - - - - - - 32.00 - vsmul.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vsmul.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu @@ -2945,40 +2945,40 @@ vsmul.vx v8, v8, x30 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf8, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vsmul.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsmul.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 4.00 - vsmul.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsmul.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 8.00 - vsmul.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsmul.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 16.00 - vsmul.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsmul.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 32.00 - vsmul.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vsmul.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vsmul.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsmul.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 4.00 - vsmul.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsmul.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 8.00 - vsmul.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsmul.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 16.00 - vsmul.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsmul.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 32.00 - vsmul.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vsmul.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsmul.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 4.00 - vsmul.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsmul.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 8.00 - vsmul.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsmul.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 16.00 - vsmul.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsmul.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 32.00 - vsmul.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsmul.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 4.00 - vsmul.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsmul.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 8.00 - vsmul.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsmul.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 16.00 - vsmul.vx v8, v8, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vsmul.vx v8, v8, t5 +# CHECK-NEXT: - - - - - - 32.00 - vsmul.vx v8, v8, t5 diff --git a/llvm/test/tools/llvm-profdata/merge-traces.proftext b/llvm/test/tools/llvm-profdata/merge-traces.proftext index bcf29ba..3512f33 100644 --- a/llvm/test/tools/llvm-profdata/merge-traces.proftext +++ b/llvm/test/tools/llvm-profdata/merge-traces.proftext @@ -1,24 +1,36 @@ -# RUN: llvm-profdata merge --temporal-profile-trace-reservoir-size=2 %s -o %t.profdata -# RUN: llvm-profdata show --temporal-profile-traces %t.profdata | FileCheck %s --check-prefixes=SAMPLE1,SEEN1 -# RUN: llvm-profdata merge --temporal-profile-trace-reservoir-size=2 %s %t.profdata -o %t.profdata -# RUN: llvm-profdata show --temporal-profile-traces %t.profdata | FileCheck %s --check-prefixes=SAMPLE2,SEEN2 -# RUN: llvm-profdata merge --temporal-profile-trace-reservoir-size=2 %s %t.profdata -o %t.profdata -# RUN: llvm-profdata show --temporal-profile-traces %t.profdata | FileCheck %s --check-prefixes=SAMPLE2,SEEN3 -# RUN: llvm-profdata merge --temporal-profile-trace-reservoir-size=2 %s %t.profdata -o %t.profdata -# RUN: llvm-profdata show --temporal-profile-traces %t.profdata | FileCheck %s --check-prefixes=SAMPLE2,SEEN4 - -# SEEN1: Temporal Profile Traces (samples=1 seen=1): -# SEEN2: Temporal Profile Traces (samples=2 seen=2): -# SEEN3: Temporal Profile Traces (samples=2 seen=3): -# SEEN4: Temporal Profile Traces (samples=2 seen=4): -# SAMPLE1: Temporal Profile Trace 0 (weight=1 count=3): -# SAMPLE1: a -# SAMPLE1: b -# SAMPLE1: c -# SAMPLE2: Temporal Profile Trace 1 (weight=1 count=3): -# SAMPLE2: a -# SAMPLE2: b -# SAMPLE2: c +# RUN: llvm-profdata merge --temporal-profile-trace-reservoir-size=2 %s --text | FileCheck %s --check-prefixes=CHECK,SEEN1,SAMPLE1 + +# Merge %s twice so it has two traces +# RUN: llvm-profdata merge --temporal-profile-trace-reservoir-size=2 %s %s --text | FileCheck %s --check-prefixes=CHECK,SEEN2,SAMPLE2 +# RUN: llvm-profdata merge --temporal-profile-trace-reservoir-size=2 %s %s -o %t-2.profdata + +# RUN: llvm-profdata merge --temporal-profile-trace-reservoir-size=2 %s %s %s --text | FileCheck %s --check-prefixes=CHECK,SEEN3,SAMPLE2 +# RUN: llvm-profdata merge --temporal-profile-trace-reservoir-size=2 %t-2.profdata %s --text | FileCheck %s --check-prefixes=CHECK,SEEN3,SAMPLE2 +# RUN: llvm-profdata merge --temporal-profile-trace-reservoir-size=2 %s %t-2.profdata --text | FileCheck %s --check-prefixes=CHECK,SEEN3,SAMPLE2 + +# RUN: llvm-profdata merge --temporal-profile-trace-reservoir-size=2 %s %s %s %s --text | FileCheck %s --check-prefixes=CHECK,SEEN4,SAMPLE2 +# RUN: llvm-profdata merge --temporal-profile-trace-reservoir-size=2 %t-2.profdata %s %s --text | FileCheck %s --check-prefixes=CHECK,SEEN4,SAMPLE2 +# RUN: llvm-profdata merge --temporal-profile-trace-reservoir-size=2 %t-2.profdata %t-2.profdata --text | FileCheck %s --check-prefixes=CHECK,SEEN4,SAMPLE2 + +# Test that we can increase the reservoir size, even if inputs are sampled +# RUN: llvm-profdata merge --temporal-profile-trace-reservoir-size=2 %s %s %s %s -o %t-4.profdata +# RUN: llvm-profdata merge --temporal-profile-trace-reservoir-size=4 %t-4.profdata %t-4.profdata --text | FileCheck %s --check-prefixes=CHECK,SEEN8,SAMPLE4 + +# Test that decreasing the reservoir size truncates traces +# RUN: llvm-profdata merge --temporal-profile-trace-reservoir-size=1 %t-4.profdata --text | FileCheck %s --check-prefixes=CHECK,SEEN4,SAMPLE1 + +# CHECK: :temporal_prof_traces +# CHECK: # Num Temporal Profile Traces: +# SAMPLE1: 1 +# SAMPLE2: 2 +# SAMPLE4: 4 +# CHECK: # Temporal Profile Trace Stream Size: +# SEEN1: 1 +# SEEN2: 2 +# SEEN3: 3 +# SEEN4: 4 +# SEEN8: 8 +# CHECK: a,b,c, # Header :ir diff --git a/llvm/test/tools/llvm-profdata/read-traces.proftext b/llvm/test/tools/llvm-profdata/read-traces.proftext index 87f69fe..5e822a9 100644 --- a/llvm/test/tools/llvm-profdata/read-traces.proftext +++ b/llvm/test/tools/llvm-profdata/read-traces.proftext @@ -3,19 +3,16 @@ # RUN: llvm-profdata merge -text %t.2.profdata -o %t.3.proftext # RUN: diff %t.1.proftext %t.3.proftext -# RUN: llvm-profdata show --temporal-profile-traces %t.1.proftext | FileCheck %s +# RUN: llvm-profdata merge -text %s | FileCheck %s -# CHECK: Temporal Profile Traces (samples=3 seen=3): -# CHECK: Temporal Profile Trace 0 (weight=1 count=3): -# CHECK: foo -# CHECK: bar -# CHECK: goo -# CHECK: Temporal Profile Trace 1 (weight=3 count=3): -# CHECK: foo -# CHECK: goo -# CHECK: bar -# CHECK: Temporal Profile Trace 2 (weight=1 count=1): -# CHECK: goo +# CHECK: :temporal_prof_traces +# CHECK: # Num Temporal Profile Traces: +# CHECK-NEXT: 3 +# CHECK: # Temporal Profile Trace Stream Size: +# CHECK-NEXT: 3 +# CHECK-DAG: foo,bar,goo, +# CHECK-DAG: foo,goo,bar, +# CHECK-DAG: goo, # Header :ir diff --git a/llvm/test/tools/llvm-profdata/trace-limit.proftext b/llvm/test/tools/llvm-profdata/trace-limit.proftext index e246ee8..6b4f974 100644 --- a/llvm/test/tools/llvm-profdata/trace-limit.proftext +++ b/llvm/test/tools/llvm-profdata/trace-limit.proftext @@ -11,7 +11,7 @@ # RUN: llvm-profdata merge --temporal-profile-max-trace-length=1000 %s -o %t.profdata # RUN: llvm-profdata show --temporal-profile-traces %t.profdata | FileCheck %s --check-prefixes=CHECK,ALL -# NONE: Temporal Profile Traces (samples=0 +# NONE: Temporal Profile Traces (samples=0 seen=0): # CHECK: Temporal Profile Traces (samples=1 seen=1): # SOME: Trace 0 (weight=1 count=2): # ALL: Trace 0 (weight=1 count=3): diff --git a/llvm/test/tools/llvm-reduce/operands-to-args-lifetimes.ll b/llvm/test/tools/llvm-reduce/operands-to-args-lifetimes.ll index d9ed9df..5db1989 100644 --- a/llvm/test/tools/llvm-reduce/operands-to-args-lifetimes.ll +++ b/llvm/test/tools/llvm-reduce/operands-to-args-lifetimes.ll @@ -4,15 +4,15 @@ ; INTERESTING: store ; REDUCED: define void @test(ptr %a) { ; REDUCED-NEXT: %a1 = alloca i32 -; REDUCED-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr %a1) +; REDUCED-NEXT: call void @llvm.lifetime.start.p0(ptr %a1) ; REDUCED-NEXT: store i32 0, ptr %a ; REDUCED-NEXT: store i32 1, ptr %a -; REDUCED-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr %a1) +; REDUCED-NEXT: call void @llvm.lifetime.end.p0(ptr %a1) define void @test() { %a = alloca i32 - call void @llvm.lifetime.start.p0(i64 4, ptr %a) + call void @llvm.lifetime.start.p0(ptr %a) store i32 0, ptr %a store i32 1, ptr %a - call void @llvm.lifetime.end.p0(i64 4, ptr %a) + call void @llvm.lifetime.end.p0(ptr %a) ret void } diff --git a/llvm/test/tools/llvm-reduce/reduce-operands-alloca.ll b/llvm/test/tools/llvm-reduce/reduce-operands-alloca.ll index b68f718..75b152f 100644 --- a/llvm/test/tools/llvm-reduce/reduce-operands-alloca.ll +++ b/llvm/test/tools/llvm-reduce/reduce-operands-alloca.ll @@ -69,13 +69,13 @@ define void @alloca_constexpr_elt() { } ; CHECK-LABEL: @alloca_lifetimes( -; ZERO: call void @llvm.lifetime.start.p0(i64 4, ptr %alloca) -; ONE: call void @llvm.lifetime.start.p0(i64 4, ptr %alloca) -; POISON: call void @llvm.lifetime.start.p0(i64 4, ptr %alloca) +; ZERO: call void @llvm.lifetime.start.p0(ptr %alloca) +; ONE: call void @llvm.lifetime.start.p0(ptr %alloca) +; POISON: call void @llvm.lifetime.start.p0(ptr %alloca) define void @alloca_lifetimes() { %alloca = alloca i32 - call void @llvm.lifetime.start.p0(i64 4, ptr %alloca) + call void @llvm.lifetime.start.p0(ptr %alloca) store i32 0, ptr %alloca - call void @llvm.lifetime.end.p0(i64 4, ptr %alloca) + call void @llvm.lifetime.end.p0(ptr %alloca) ret void } |