; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py ; RUN: opt -passes="print" 2>&1 -disable-output -cost-kind=all -mtriple=aarch64 < %s | FileCheck %s target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" define void @reduce() { ; CHECK-LABEL: 'reduce' ; CHECK-NEXT: Cost Model: Found costs of 0 for: %V1 = call i1 @llvm.vector.reduce.xor.v1i1(<1 x i1> undef) ; CHECK-NEXT: Cost Model: Found costs of 2 for: %V2 = call i1 @llvm.vector.reduce.xor.v2i1(<2 x i1> undef) ; CHECK-NEXT: Cost Model: Found costs of 2 for: %V4 = call i1 @llvm.vector.reduce.xor.v4i1(<4 x i1> undef) ; CHECK-NEXT: Cost Model: Found costs of 2 for: %V8 = call i1 @llvm.vector.reduce.xor.v8i1(<8 x i1> undef) ; CHECK-NEXT: Cost Model: Found costs of 2 for: %V16 = call i1 @llvm.vector.reduce.xor.v16i1(<16 x i1> undef) ; CHECK-NEXT: Cost Model: Found costs of 3 for: %V32 = call i1 @llvm.vector.reduce.xor.v32i1(<32 x i1> undef) ; CHECK-NEXT: Cost Model: Found costs of 5 for: %V64 = call i1 @llvm.vector.reduce.xor.v64i1(<64 x i1> undef) ; CHECK-NEXT: Cost Model: Found costs of 9 for: %V128 = call i1 @llvm.vector.reduce.xor.v128i1(<128 x i1> undef) ; CHECK-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %V1i8 = call i8 @llvm.vector.reduce.xor.v1i8(<1 x i8> undef) ; CHECK-NEXT: Cost Model: Found costs of RThru:6 CodeSize:5 Lat:6 SizeLat:6 for: %V3i8 = call i8 @llvm.vector.reduce.xor.v3i8(<3 x i8> undef) ; CHECK-NEXT: Cost Model: Found costs of 4 for: %V4i8 = call i8 @llvm.vector.reduce.xor.v4i8(<4 x i8> undef) ; CHECK-NEXT: Cost Model: Found costs of 5 for: %V8i8 = call i8 @llvm.vector.reduce.xor.v8i8(<8 x i8> undef) ; CHECK-NEXT: Cost Model: Found costs of 7 for: %V16i8 = call i8 @llvm.vector.reduce.xor.v16i8(<16 x i8> undef) ; CHECK-NEXT: Cost Model: Found costs of 8 for: %V32i8 = call i8 @llvm.vector.reduce.xor.v32i8(<32 x i8> undef) ; CHECK-NEXT: Cost Model: Found costs of 10 for: %V64i8 = call i8 @llvm.vector.reduce.xor.v64i8(<64 x i8> undef) ; CHECK-NEXT: Cost Model: Found costs of 4 for: %V4i16 = call i16 @llvm.vector.reduce.xor.v4i16(<4 x i16> undef) ; CHECK-NEXT: Cost Model: Found costs of 6 for: %V8i16 = call i16 @llvm.vector.reduce.xor.v8i16(<8 x i16> undef) ; CHECK-NEXT: Cost Model: Found costs of 7 for: %V16i16 = call i16 @llvm.vector.reduce.xor.v16i16(<16 x i16> undef) ; CHECK-NEXT: Cost Model: Found costs of 3 for: %V2i32 = call i32 @llvm.vector.reduce.xor.v2i32(<2 x i32> undef) ; CHECK-NEXT: Cost Model: Found costs of 5 for: %V4i32 = call i32 @llvm.vector.reduce.xor.v4i32(<4 x i32> undef) ; CHECK-NEXT: Cost Model: Found costs of 6 for: %V8i32 = call i32 @llvm.vector.reduce.xor.v8i32(<8 x i32> undef) ; CHECK-NEXT: Cost Model: Found costs of 3 for: %V2i64 = call i64 @llvm.vector.reduce.xor.v2i64(<2 x i64> undef) ; CHECK-NEXT: Cost Model: Found costs of 4 for: %V4i64 = call i64 @llvm.vector.reduce.xor.v4i64(<4 x i64> undef) ; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void ; %V1 = call i1 @llvm.vector.reduce.xor.v1i1(<1 x i1> undef) %V2 = call i1 @llvm.vector.reduce.xor.v2i1(<2 x i1> undef) %V4 = call i1 @llvm.vector.reduce.xor.v4i1(<4 x i1> undef) %V8 = call i1 @llvm.vector.reduce.xor.v8i1(<8 x i1> undef) %V16 = call i1 @llvm.vector.reduce.xor.v16i1(<16 x i1> undef) %V32 = call i1 @llvm.vector.reduce.xor.v32i1(<32 x i1> undef) %V64 = call i1 @llvm.vector.reduce.xor.v64i1(<64 x i1> undef) %V128 = call i1 @llvm.vector.reduce.xor.v128i1(<128 x i1> undef) %V1i8 = call i8 @llvm.vector.reduce.xor.v1i8(<1 x i8> undef) %V3i8 = call i8 @llvm.vector.reduce.xor.v3i8(<3 x i8> undef) %V4i8 = call i8 @llvm.vector.reduce.xor.v4i8(<4 x i8> undef) %V8i8 = call i8 @llvm.vector.reduce.xor.v8i8(<8 x i8> undef) %V16i8 = call i8 @llvm.vector.reduce.xor.v16i8(<16 x i8> undef) %V32i8 = call i8 @llvm.vector.reduce.xor.v32i8(<32 x i8> undef) %V64i8 = call i8 @llvm.vector.reduce.xor.v64i8(<64 x i8> undef) %V4i16 = call i16 @llvm.vector.reduce.xor.v4i16(<4 x i16> undef) %V8i16 = call i16 @llvm.vector.reduce.xor.v8i16(<8 x i16> undef) %V16i16 = call i16 @llvm.vector.reduce.xor.v16i16(<16 x i16> undef) %V2i32 = call i32 @llvm.vector.reduce.xor.v2i32(<2 x i32> undef) %V4i32 = call i32 @llvm.vector.reduce.xor.v4i32(<4 x i32> undef) %V8i32 = call i32 @llvm.vector.reduce.xor.v8i32(<8 x i32> undef) %V2i64 = call i64 @llvm.vector.reduce.xor.v2i64(<2 x i64> undef) %V4i64 = call i64 @llvm.vector.reduce.xor.v4i64(<4 x i64> undef) ret void } declare i1 @llvm.vector.reduce.xor.v1i1(<1 x i1>) declare i1 @llvm.vector.reduce.xor.v2i1(<2 x i1>) declare i1 @llvm.vector.reduce.xor.v4i1(<4 x i1>) declare i1 @llvm.vector.reduce.xor.v8i1(<8 x i1>) declare i1 @llvm.vector.reduce.xor.v16i1(<16 x i1>) declare i1 @llvm.vector.reduce.xor.v32i1(<32 x i1>) declare i1 @llvm.vector.reduce.xor.v64i1(<64 x i1>) declare i1 @llvm.vector.reduce.xor.v128i1(<128 x i1>) declare i8 @llvm.vector.reduce.xor.v1i8(<1 x i8>) declare i8 @llvm.vector.reduce.xor.v3i8(<3 x i8>) declare i8 @llvm.vector.reduce.xor.v4i8(<4 x i8>) declare i8 @llvm.vector.reduce.xor.v8i8(<8 x i8>) declare i8 @llvm.vector.reduce.xor.v16i8(<16 x i8>) declare i8 @llvm.vector.reduce.xor.v32i8(<32 x i8>) declare i8 @llvm.vector.reduce.xor.v64i8(<64 x i8>) declare i16 @llvm.vector.reduce.xor.v4i16(<4 x i16>) declare i16 @llvm.vector.reduce.xor.v8i16(<8 x i16>) declare i16 @llvm.vector.reduce.xor.v16i16(<16 x i16>) declare i32 @llvm.vector.reduce.xor.v2i32(<2 x i32>) declare i32 @llvm.vector.reduce.xor.v4i32(<4 x i32>) declare i32 @llvm.vector.reduce.xor.v8i32(<8 x i32>) declare i64 @llvm.vector.reduce.xor.v2i64(<2 x i64>) declare i64 @llvm.vector.reduce.xor.v4i64(<4 x i64>)