diff options
Diffstat (limited to 'llvm/test/CodeGen/AArch64/aarch64-reassociate-accumulators.ll')
| -rw-r--r-- | llvm/test/CodeGen/AArch64/aarch64-reassociate-accumulators.ll | 56 |
1 files changed, 28 insertions, 28 deletions
diff --git a/llvm/test/CodeGen/AArch64/aarch64-reassociate-accumulators.ll b/llvm/test/CodeGen/AArch64/aarch64-reassociate-accumulators.ll index a84d666..d1bcad4 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-reassociate-accumulators.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-reassociate-accumulators.ll @@ -24,8 +24,8 @@ loop: %acc_phi = phi <8 x i16> [ zeroinitializer, %entry ], [ %acc_next, %loop ] %ptr1_i = getelementptr i8, ptr %ptr1, i32 %i %ptr2_i = getelementptr i8, ptr %ptr2, i32 %i - %a = load <8 x i8>, <8 x i8>* %ptr1_i, align 1 - %b = load <8 x i8>, <8 x i8>* %ptr2_i, align 1 + %a = load <8 x i8>, ptr %ptr1_i, align 1 + %b = load <8 x i8>, ptr %ptr2_i, align 1 %vabd = call <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8> %a, <8 x i8> %b) %vabd_ext = zext <8 x i8> %vabd to <8 x i16> %acc_next = add <8 x i16> %vabd_ext, %acc_phi @@ -65,8 +65,8 @@ loop: %acc_phi = phi <4 x i32> [ zeroinitializer, %entry ], [ %acc_next, %loop ] %ptr1_i = getelementptr i16, ptr %ptr1, i32 %i %ptr2_i = getelementptr i16, ptr %ptr2, i32 %i - %a = load <4 x i16>, <4 x i16>* %ptr1_i, align 1 - %b = load <4 x i16>, <4 x i16>* %ptr2_i, align 1 + %a = load <4 x i16>, ptr %ptr1_i, align 1 + %b = load <4 x i16>, ptr %ptr2_i, align 1 %vabd = tail call <4 x i16> @llvm.aarch64.neon.sabd.v4i16(<4 x i16> %a, <4 x i16> %b) %vmov = zext <4 x i16> %vabd to <4 x i32> %acc_next = add <4 x i32> %vmov, %acc_phi @@ -116,8 +116,8 @@ loop: %acc_phi_lo = phi <8 x i16> [ zeroinitializer, %entry ], [ %acc_next_lo, %loop ] %ptr1_i = getelementptr i8, ptr %ptr1, i32 %i %ptr2_i = getelementptr i8, ptr %ptr2, i32 %i - %a = load <16 x i8>, <16 x i8>* %ptr1_i, align 1 - %b = load <16 x i8>, <16 x i8>* %ptr2_i, align 1 + %a = load <16 x i8>, ptr %ptr1_i, align 1 + %b = load <16 x i8>, ptr %ptr2_i, align 1 %a_hi = shufflevector <16 x i8> %a, <16 x i8> zeroinitializer, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15> %b_hi = shufflevector <16 x i8> %b, <16 x i8> zeroinitializer, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15> %a_lo = shufflevector <16 x i8> %a, <16 x i8> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> @@ -160,8 +160,8 @@ loop: %acc_phi = phi <4 x i32> [ zeroinitializer, %entry ], [ %acc_next, %loop ] %ptr1_i = getelementptr i32, ptr %ptr1, i32 %i %ptr2_i = getelementptr i32, ptr %ptr2, i32 %i - %a = load <4 x i32>, <4 x i32>* %ptr1_i, align 1 - %b = load <4 x i32>, <4 x i32>* %ptr2_i, align 1 + %a = load <4 x i32>, ptr %ptr1_i, align 1 + %b = load <4 x i32>, ptr %ptr2_i, align 1 %vabd = tail call <4 x i32> @llvm.aarch64.neon.uabd.v4i32(<4 x i32> %a, <4 x i32> %b) %acc_next = add <4 x i32> %acc_phi, %vabd %next_i = add i32 %i, 4 @@ -198,8 +198,8 @@ loop: ; Load values from ptr1 and ptr2 %ptr1_i = getelementptr i32, ptr %ptr1, i32 %i %ptr2_i = getelementptr i32, ptr %ptr2, i32 %i - %a = load <4 x i32>, <4 x i32>* %ptr1_i, align 1 - %b = load <4 x i32>, <4 x i32>* %ptr2_i, align 1 + %a = load <4 x i32>, ptr %ptr1_i, align 1 + %b = load <4 x i32>, ptr %ptr2_i, align 1 ; Perform the intrinsic operation %vabd = tail call <4 x i32> @llvm.aarch64.neon.sabd.v4i32(<4 x i32> %a, <4 x i32> %b) %acc_next = add <4 x i32> %acc_phi, %vabd @@ -237,8 +237,8 @@ loop: %acc_phi = phi <2 x i32> [ zeroinitializer, %entry ], [ %acc_next, %loop ] %ptr1_i = getelementptr i32, ptr %ptr1, i32 %i %ptr2_i = getelementptr i32, ptr %ptr2, i32 %i - %a = load <2 x i32>, <2 x i32>* %ptr1_i, align 1 - %b = load <2 x i32>, <2 x i32>* %ptr2_i, align 1 + %a = load <2 x i32>, ptr %ptr1_i, align 1 + %b = load <2 x i32>, ptr %ptr2_i, align 1 %vabd = tail call <2 x i32> @llvm.aarch64.neon.uabd.v2i32(<2 x i32> %a, <2 x i32> %b) %acc_next = add <2 x i32> %acc_phi, %vabd %next_i = add i32 %i, 2 @@ -272,8 +272,8 @@ loop: %acc_phi = phi <8 x i8> [ zeroinitializer, %entry ], [ %acc_next, %loop ] %ptr1_i = getelementptr i8, ptr %ptr1, i32 %i %ptr2_i = getelementptr i8, ptr %ptr2, i32 %i - %a = load <8 x i8>, <8 x i8>* %ptr1_i, align 1 - %b = load <8 x i8>, <8 x i8>* %ptr2_i, align 1 + %a = load <8 x i8>, ptr %ptr1_i, align 1 + %b = load <8 x i8>, ptr %ptr2_i, align 1 %vabd = tail call <8 x i8> @llvm.aarch64.neon.uabd.v8i8(<8 x i8> %a, <8 x i8> %b) %acc_next = add <8 x i8> %acc_phi, %vabd %next_i = add i32 %i, 8 @@ -307,8 +307,8 @@ loop: %acc_phi = phi <16 x i8> [ zeroinitializer, %entry ], [ %acc_next, %loop ] %ptr1_i = getelementptr i8, ptr %ptr1, i32 %i %ptr2_i = getelementptr i8, ptr %ptr2, i32 %i - %a = load <16 x i8>, <16 x i8>* %ptr1_i, align 1 - %b = load <16 x i8>, <16 x i8>* %ptr2_i, align 1 + %a = load <16 x i8>, ptr %ptr1_i, align 1 + %b = load <16 x i8>, ptr %ptr2_i, align 1 %vabd = tail call <16 x i8> @llvm.aarch64.neon.uabd.v16i8(<16 x i8> %a, <16 x i8> %b) %acc_next = add <16 x i8> %acc_phi, %vabd %next_i = add i32 %i, 16 @@ -342,8 +342,8 @@ loop: %acc_phi = phi <8 x i16> [ zeroinitializer, %entry ], [ %acc_next, %loop ] %ptr1_i = getelementptr i16, ptr %ptr1, i32 %i %ptr2_i = getelementptr i16, ptr %ptr2, i32 %i - %a = load <8 x i16>, <8 x i16>* %ptr1_i, align 1 - %b = load <8 x i16>, <8 x i16>* %ptr2_i, align 1 + %a = load <8 x i16>, ptr %ptr1_i, align 1 + %b = load <8 x i16>, ptr %ptr2_i, align 1 %vabd = tail call <8 x i16> @llvm.aarch64.neon.uabd.v8i16(<8 x i16> %a, <8 x i16> %b) %acc_next = add <8 x i16> %acc_phi, %vabd %next_i = add i32 %i, 8 @@ -377,8 +377,8 @@ loop: %acc_phi = phi <8 x i8> [ zeroinitializer, %entry ], [ %acc_next, %loop ] %ptr1_i = getelementptr i8, ptr %ptr1, i32 %i %ptr2_i = getelementptr i8, ptr %ptr2, i32 %i - %a = load <8 x i8>, <8 x i8>* %ptr1_i, align 1 - %b = load <8 x i8>, <8 x i8>* %ptr2_i, align 1 + %a = load <8 x i8>, ptr %ptr1_i, align 1 + %b = load <8 x i8>, ptr %ptr2_i, align 1 %vabd = tail call <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8> %a, <8 x i8> %b) %acc_next = add <8 x i8> %acc_phi, %vabd %next_i = add i32 %i, 8 @@ -411,8 +411,8 @@ loop: %acc_phi = phi <4 x i16> [ zeroinitializer, %entry ], [ %acc_next, %loop ] %ptr1_i = getelementptr i16, ptr %ptr1, i32 %i %ptr2_i = getelementptr i16, ptr %ptr2, i32 %i - %a = load <4 x i16>, <4 x i16>* %ptr1_i, align 1 - %b = load <4 x i16>, <4 x i16>* %ptr2_i, align 1 + %a = load <4 x i16>, ptr %ptr1_i, align 1 + %b = load <4 x i16>, ptr %ptr2_i, align 1 %vabd = tail call <4 x i16> @llvm.aarch64.neon.sabd.v4i16(<4 x i16> %a, <4 x i16> %b) %acc_next = add <4 x i16> %acc_phi, %vabd %next_i = add i32 %i, 4 @@ -445,8 +445,8 @@ loop: %acc_phi = phi <8 x i16> [ zeroinitializer, %entry ], [ %acc_next, %loop ] %ptr1_i = getelementptr i16, ptr %ptr1, i32 %i %ptr2_i = getelementptr i16, ptr %ptr2, i32 %i - %a = load <8 x i16>, <8 x i16>* %ptr1_i, align 1 - %b = load <8 x i16>, <8 x i16>* %ptr2_i, align 1 + %a = load <8 x i16>, ptr %ptr1_i, align 1 + %b = load <8 x i16>, ptr %ptr2_i, align 1 %vabd = tail call <8 x i16> @llvm.aarch64.neon.sabd.v8i16(<8 x i16> %a, <8 x i16> %b) %acc_next = add <8 x i16> %acc_phi, %vabd %next_i = add i32 %i, 8 @@ -480,8 +480,8 @@ loop: %acc_phi = phi <8 x i16> [ zeroinitializer, %entry ], [ %acc_next, %loop ] %ptr1_i = getelementptr i8, ptr %ptr1, i32 %i %ptr2_i = getelementptr i8, ptr %ptr2, i32 %i - %a = load <8 x i8>, <8 x i8>* %ptr1_i, align 1 - %b = load <8 x i8>, <8 x i8>* %ptr2_i, align 1 + %a = load <8 x i8>, ptr %ptr1_i, align 1 + %b = load <8 x i8>, ptr %ptr2_i, align 1 %vabd = tail call <8 x i8> @llvm.aarch64.neon.uabd.v8i8(<8 x i8> %a, <8 x i8> %b) %vmov = zext <8 x i8> %vabd to <8 x i16> %acc_next = add <8 x i16> %vmov, %acc_phi @@ -516,8 +516,8 @@ loop: %acc_phi = phi <4 x i32> [ zeroinitializer, %entry ], [ %acc_next, %loop ] %ptr1_i = getelementptr i16, ptr %ptr1, i32 %i %ptr2_i = getelementptr i16, ptr %ptr2, i32 %i - %a = load <4 x i16>, <4 x i16>* %ptr1_i, align 1 - %b = load <4 x i16>, <4 x i16>* %ptr2_i, align 1 + %a = load <4 x i16>, ptr %ptr1_i, align 1 + %b = load <4 x i16>, ptr %ptr2_i, align 1 %vabd = tail call <4 x i16> @llvm.aarch64.neon.uabd.v4i16(<4 x i16> %a, <4 x i16> %b) %vmov = zext <4 x i16> %vabd to <4 x i32> %acc_next = add <4 x i32> %vmov, %acc_phi |
