; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt < %s -passes=slp-vectorizer -S |FileCheck %s ; RUN: opt < %s -aa-pipeline=basic-aa -passes=slp-vectorizer -S |FileCheck %s target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" target triple = "x86_64-unknown-unknown" ; Test if SLP can handle GEP expressions. ; The test perform the following action: ; x->first = y->first + 16 ; x->second = y->second + 16 define void @foo1 (ptr noalias %x, ptr noalias %y) { ; CHECK-LABEL: @foo1( ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds { ptr, ptr }, ptr [[Y:%.*]], i64 0, i32 0 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds { ptr, ptr }, ptr [[X:%.*]], i64 0, i32 0 ; CHECK-NEXT: [[TMP4:%.*]] = load <2 x ptr>, ptr [[TMP1]], align 8 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i32, <2 x ptr> [[TMP4]], <2 x i64> splat (i64 16) ; CHECK-NEXT: store <2 x ptr> [[TMP5]], ptr [[TMP2]], align 8 ; CHECK-NEXT: ret void ; %1 = getelementptr inbounds { ptr, ptr }, ptr %y, i64 0, i32 0 %2 = load ptr, ptr %1, align 8 %3 = getelementptr inbounds i32, ptr %2, i64 16 %4 = getelementptr inbounds { ptr, ptr }, ptr %x, i64 0, i32 0 store ptr %3, ptr %4, align 8 %5 = getelementptr inbounds { ptr, ptr }, ptr %y, i64 0, i32 1 %6 = load ptr, ptr %5, align 8 %7 = getelementptr inbounds i32, ptr %6, i64 16 %8 = getelementptr inbounds { ptr, ptr }, ptr %x, i64 0, i32 1 store ptr %7, ptr %8, align 8 ret void } ; Test that we don't vectorize GEP expressions if indexes are not constants. ; We can't produce an efficient code in that case. define void @foo2 (ptr noalias %x, ptr noalias %y, i32 %i) { ; CHECK-LABEL: @foo2( ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds { ptr, ptr }, ptr [[Y:%.*]], i64 0, i32 0 ; CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TMP1]], align 8 ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 [[I:%.*]] ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds { ptr, ptr }, ptr [[X:%.*]], i64 0, i32 0 ; CHECK-NEXT: store ptr [[TMP3]], ptr [[TMP4]], align 8 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds { ptr, ptr }, ptr [[Y]], i64 0, i32 1 ; CHECK-NEXT: [[TMP6:%.*]] = load ptr, ptr [[TMP5]], align 8 ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[TMP6]], i32 [[I]] ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds { ptr, ptr }, ptr [[X]], i64 0, i32 1 ; CHECK-NEXT: store ptr [[TMP7]], ptr [[TMP8]], align 8 ; CHECK-NEXT: ret void ; %1 = getelementptr inbounds { ptr, ptr }, ptr %y, i64 0, i32 0 %2 = load ptr, ptr %1, align 8 %3 = getelementptr inbounds i32, ptr %2, i32 %i %4 = getelementptr inbounds { ptr, ptr }, ptr %x, i64 0, i32 0 store ptr %3, ptr %4, align 8 %5 = getelementptr inbounds { ptr, ptr }, ptr %y, i64 0, i32 1 %6 = load ptr, ptr %5, align 8 %7 = getelementptr inbounds i32, ptr %6, i32 %i %8 = getelementptr inbounds { ptr, ptr }, ptr %x, i64 0, i32 1 store ptr %7, ptr %8, align 8 ret void }