; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2 ; RUN: opt -passes=instcombine -S < %s 2>&1 | FileCheck %s %struct.test = type { , } define @load(ptr %x) { ; CHECK-LABEL: define @load ; CHECK-SAME: (ptr [[X:%.*]]) { ; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP2:%.*]] = shl nuw i64 [[TMP1]], 2 ; CHECK-NEXT: [[A_ELT1:%.*]] = getelementptr inbounds i8, ptr [[X]], i64 [[TMP2]] ; CHECK-NEXT: [[A_UNPACK2:%.*]] = load , ptr [[A_ELT1]], align 4 ; CHECK-NEXT: ret [[A_UNPACK2]] ; %a = load %struct.test, ptr %x %b = extractvalue %struct.test %a, 1 ret %b } define void @store(ptr %x, %y, %z) { ; CHECK-LABEL: define void @store ; CHECK-SAME: (ptr [[X:%.*]], [[Y:%.*]], [[Z:%.*]]) { ; CHECK-NEXT: store [[Y]], ptr [[X]], align 4 ; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP2:%.*]] = shl nuw i64 [[TMP1]], 2 ; CHECK-NEXT: [[X_REPACK1:%.*]] = getelementptr inbounds i8, ptr [[X]], i64 [[TMP2]] ; CHECK-NEXT: store [[Z]], ptr [[X_REPACK1]], align 4 ; CHECK-NEXT: ret void ; %a = insertvalue %struct.test undef, %y, 0 %b = insertvalue %struct.test %a, %z, 1 store %struct.test %b, ptr %x ret void } define {, } @split_load(ptr %p) nounwind { ; CHECK-LABEL: define { , } @split_load ; CHECK-SAME: (ptr [[P:%.*]]) #[[ATTR0:[0-9]+]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[R_UNPACK:%.*]] = load , ptr [[P]], align 16 ; CHECK-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[R_UNPACK]], 0 ; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP2:%.*]] = shl nuw i64 [[TMP1]], 4 ; CHECK-NEXT: [[R_ELT1:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP2]] ; CHECK-NEXT: [[R_UNPACK2:%.*]] = load , ptr [[R_ELT1]], align 16 ; CHECK-NEXT: [[R3:%.*]] = insertvalue { , } [[TMP0]], [[R_UNPACK2]], 1 ; CHECK-NEXT: ret { , } [[R3]] ; entry: %r = load {, }, ptr %p ret {, } %r } define {} @split_load_one(ptr %p) nounwind { ; CHECK-LABEL: define { } @split_load_one ; CHECK-SAME: (ptr [[P:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[R_UNPACK:%.*]] = load , ptr [[P]], align 16 ; CHECK-NEXT: [[R1:%.*]] = insertvalue { } poison, [[R_UNPACK]], 0 ; CHECK-NEXT: ret { } [[R1]] ; entry: %r = load {}, ptr %p ret {} %r } define void @split_store({, } %x, ptr %p) nounwind { ; CHECK-LABEL: define void @split_store ; CHECK-SAME: ({ , } [[X:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[X_ELT:%.*]] = extractvalue { , } [[X]], 0 ; CHECK-NEXT: store [[X_ELT]], ptr [[P]], align 16 ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP0]], 4 ; CHECK-NEXT: [[P_REPACK1:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP1]] ; CHECK-NEXT: [[X_ELT2:%.*]] = extractvalue { , } [[X]], 1 ; CHECK-NEXT: store [[X_ELT2]], ptr [[P_REPACK1]], align 16 ; CHECK-NEXT: ret void ; entry: store {, } %x, ptr %p ret void } define void @split_store_one({} %x, ptr %p) nounwind { ; CHECK-LABEL: define void @split_store_one ; CHECK-SAME: ({ } [[X:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = extractvalue { } [[X]], 0 ; CHECK-NEXT: store [[TMP0]], ptr [[P]], align 16 ; CHECK-NEXT: ret void ; entry: store {} %x, ptr %p ret void } define {<16 x i8>, <16 x i8>} @check_v16i8_v4i32({<4 x i32>, <4 x i32>} %x, ptr %p) nounwind { ; CHECK-LABEL: define { <16 x i8>, <16 x i8> } @check_v16i8_v4i32 ; CHECK-SAME: ({ <4 x i32>, <4 x i32> } [[X:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[X_ELT:%.*]] = extractvalue { <4 x i32>, <4 x i32> } [[X]], 0 ; CHECK-NEXT: store <4 x i32> [[X_ELT]], ptr [[P]], align 16 ; CHECK-NEXT: [[P_REPACK1:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 16 ; CHECK-NEXT: [[X_ELT2:%.*]] = extractvalue { <4 x i32>, <4 x i32> } [[X]], 1 ; CHECK-NEXT: store <4 x i32> [[X_ELT2]], ptr [[P_REPACK1]], align 16 ; CHECK-NEXT: [[R_UNPACK_CAST:%.*]] = bitcast <4 x i32> [[X_ELT]] to <16 x i8> ; CHECK-NEXT: [[TMP0:%.*]] = insertvalue { <16 x i8>, <16 x i8> } poison, <16 x i8> [[R_UNPACK_CAST]], 0 ; CHECK-NEXT: [[R_UNPACK4_CAST:%.*]] = bitcast <4 x i32> [[X_ELT2]] to <16 x i8> ; CHECK-NEXT: [[R5:%.*]] = insertvalue { <16 x i8>, <16 x i8> } [[TMP0]], <16 x i8> [[R_UNPACK4_CAST]], 1 ; CHECK-NEXT: ret { <16 x i8>, <16 x i8> } [[R5]] ; entry: store {<4 x i32>, <4 x i32>} %x, ptr %p %r = load {<16 x i8>, <16 x i8>}, ptr %p ret {<16 x i8>, <16 x i8>} %r } define {, } @check_nxv16i8_nxv4i32({, } %x, ptr %p) nounwind { ; CHECK-LABEL: define { , } @check_nxv16i8_nxv4i32 ; CHECK-SAME: ({ , } [[X:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[X_ELT:%.*]] = extractvalue { , } [[X]], 0 ; CHECK-NEXT: store [[X_ELT]], ptr [[P]], align 16 ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP0]], 4 ; CHECK-NEXT: [[P_REPACK1:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP1]] ; CHECK-NEXT: [[X_ELT2:%.*]] = extractvalue { , } [[X]], 1 ; CHECK-NEXT: store [[X_ELT2]], ptr [[P_REPACK1]], align 16 ; CHECK-NEXT: [[R_UNPACK:%.*]] = load , ptr [[P]], align 16 ; CHECK-NEXT: [[TMP2:%.*]] = insertvalue { , } poison, [[R_UNPACK]], 0 ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP4:%.*]] = shl nuw i64 [[TMP3]], 4 ; CHECK-NEXT: [[R_ELT3:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP4]] ; CHECK-NEXT: [[R_UNPACK4:%.*]] = load , ptr [[R_ELT3]], align 16 ; CHECK-NEXT: [[R5:%.*]] = insertvalue { , } [[TMP2]], [[R_UNPACK4]], 1 ; CHECK-NEXT: ret { , } [[R5]] ; entry: store {, } %x, ptr %p %r = load {, }, ptr %p ret {, } %r } define {, } @alloca_nxv16i8_nxv4i32({, } %x) nounwind { ; CHECK-LABEL: define { , } @alloca_nxv16i8_nxv4i32 ; CHECK-SAME: ({ , } [[X:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[P:%.*]] = alloca { , }, align 16 ; CHECK-NEXT: [[X_ELT:%.*]] = extractvalue { , } [[X]], 0 ; CHECK-NEXT: store [[X_ELT]], ptr [[P]], align 16 ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP0]], 4 ; CHECK-NEXT: [[P_REPACK1:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP1]] ; CHECK-NEXT: [[X_ELT2:%.*]] = extractvalue { , } [[X]], 1 ; CHECK-NEXT: store [[X_ELT2]], ptr [[P_REPACK1]], align 16 ; CHECK-NEXT: [[R_UNPACK:%.*]] = load , ptr [[P]], align 16 ; CHECK-NEXT: [[TMP2:%.*]] = insertvalue { , } poison, [[R_UNPACK]], 0 ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP4:%.*]] = shl nuw i64 [[TMP3]], 4 ; CHECK-NEXT: [[R_ELT3:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP4]] ; CHECK-NEXT: [[R_UNPACK4:%.*]] = load , ptr [[R_ELT3]], align 16 ; CHECK-NEXT: [[R5:%.*]] = insertvalue { , } [[TMP2]], [[R_UNPACK4]], 1 ; CHECK-NEXT: ret { , } [[R5]] ; entry: %p = alloca {, } store {, } %x, ptr %p %r = load {, }, ptr %p ret {, } %r } define { <16 x i8>, <32 x i8> } @differenttypes({ <4 x i32>, <8 x i32> } %a, ptr %p) { ; CHECK-LABEL: define { <16 x i8>, <32 x i8> } @differenttypes ; CHECK-SAME: ({ <4 x i32>, <8 x i32> } [[A:%.*]], ptr [[P:%.*]]) { ; CHECK-NEXT: entry: ; CHECK-NEXT: store { <4 x i32>, <8 x i32> } [[A]], ptr [[P]], align 16 ; CHECK-NEXT: [[TMP0:%.*]] = load { <16 x i8>, <32 x i8> }, ptr [[P]], align 16 ; CHECK-NEXT: ret { <16 x i8>, <32 x i8> } [[TMP0]] ; entry: store { <4 x i32>, <8 x i32> } %a, ptr %p, align 16 %2 = load { <16 x i8>, <32 x i8> }, ptr %p, align 16 ret { <16 x i8>, <32 x i8> } %2 }