; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=aarch64-unknown-unknown -mcpu=cortex-a53 | FileCheck %s ; Test for DAGCombiner optimization: fold (xor (smin(x, C), C)) -> select (x < C), xor (x, C), 0 define i64 @test_smin_neg_one(i64 %a) { ; CHECK-LABEL: test_smin_neg_one: ; CHECK: // %bb.0: ; CHECK-NEXT: cmn x0, #1 ; CHECK-NEXT: csinv x0, xzr, x0, ge ; CHECK-NEXT: ret %1 = tail call i64 @llvm.smin.i64(i64 %a, i64 -1) %retval.0 = xor i64 %1, -1 ret i64 %retval.0 } define i64 @test_smin_constant(i64 %a) { ; CHECK-LABEL: test_smin_constant: ; CHECK: // %bb.0: ; CHECK-NEXT: eor x8, x0, #0x8 ; CHECK-NEXT: cmp x0, #8 ; CHECK-NEXT: csel x0, x8, xzr, lt ; CHECK-NEXT: ret %1 = tail call i64 @llvm.smin.i64(i64 %a, i64 8) %retval.0 = xor i64 %1, 8 ret i64 %retval.0 } ; Test for DAGCombiner optimization: fold (xor (smax(x, C), C)) -> select (x > C), xor (x, C), 0 define i64 @test_smax_neg_one(i64 %a) { ; CHECK-LABEL: test_smax_neg_one: ; CHECK: // %bb.0: ; CHECK-NEXT: mvn x8, x0 ; CHECK-NEXT: bic x0, x8, x0, asr #63 ; CHECK-NEXT: ret %1 = tail call i64 @llvm.smax.i64(i64 %a, i64 -1) %retval.0 = xor i64 %1, -1 ret i64 %retval.0 } define i64 @test_smax_constant(i64 %a) { ; CHECK-LABEL: test_smax_constant: ; CHECK: // %bb.0: ; CHECK-NEXT: eor x8, x0, #0x8 ; CHECK-NEXT: cmp x0, #8 ; CHECK-NEXT: csel x0, x8, xzr, gt ; CHECK-NEXT: ret %1 = tail call i64 @llvm.smax.i64(i64 %a, i64 8) %retval.0 = xor i64 %1, 8 ret i64 %retval.0 } define i64 @test_umin_neg_one(i64 %a) { ; CHECK-LABEL: test_umin_neg_one: ; CHECK: // %bb.0: ; CHECK-NEXT: mvn x0, x0 ; CHECK-NEXT: ret %1 = tail call i64 @llvm.umin.i64(i64 %a, i64 -1) %retval.0 = xor i64 %1, -1 ret i64 %retval.0 } define i64 @test_umin_constant(i64 %a) { ; CHECK-LABEL: test_umin_constant: ; CHECK: // %bb.0: ; CHECK-NEXT: eor x8, x0, #0x8 ; CHECK-NEXT: cmp x0, #8 ; CHECK-NEXT: csel x0, x8, xzr, lo ; CHECK-NEXT: ret %1 = tail call i64 @llvm.umin.i64(i64 %a, i64 8) %retval.0 = xor i64 %1, 8 ret i64 %retval.0 } define i64 @test_umax_neg_one(i64 %a) { ; CHECK-LABEL: test_umax_neg_one: ; CHECK: // %bb.0: ; CHECK-NEXT: mov x0, xzr ; CHECK-NEXT: ret %1 = tail call i64 @llvm.umax.i64(i64 %a, i64 -1) %retval.0 = xor i64 %1, -1 ret i64 %retval.0 } define i64 @test_umax_constant(i64 %a) { ; CHECK-LABEL: test_umax_constant: ; CHECK: // %bb.0: ; CHECK-NEXT: eor x8, x0, #0x8 ; CHECK-NEXT: cmp x0, #8 ; CHECK-NEXT: csel x0, x8, xzr, hi ; CHECK-NEXT: ret %1 = tail call i64 @llvm.umax.i64(i64 %a, i64 8) %retval.0 = xor i64 %1, 8 ret i64 %retval.0 } ; Test vector cases define <4 x i32> @test_smin_vector_neg_one(<4 x i32> %a) { ; CHECK-LABEL: test_smin_vector_neg_one: ; CHECK: // %bb.0: ; CHECK-NEXT: movi v1.2d, #0xffffffffffffffff ; CHECK-NEXT: cmgt v1.4s, v1.4s, v0.4s ; CHECK-NEXT: bic v0.16b, v1.16b, v0.16b ; CHECK-NEXT: ret %1 = tail call <4 x i32> @llvm.smin.v4i32(<4 x i32> %a, <4 x i32> ) %retval.0 = xor <4 x i32> %1, ret <4 x i32> %retval.0 } define <4 x i32> @test_smin_vector_constant(<4 x i32> %a) { ; CHECK-LABEL: test_smin_vector_constant: ; CHECK: // %bb.0: ; CHECK-NEXT: movi v1.4s, #8 ; CHECK-NEXT: smin v0.4s, v0.4s, v1.4s ; CHECK-NEXT: eor v0.16b, v0.16b, v1.16b ; CHECK-NEXT: ret %1 = tail call <4 x i32> @llvm.smin.v4i32(<4 x i32> %a, <4 x i32> ) %retval.0 = xor <4 x i32> %1, ret <4 x i32> %retval.0 } define <4 x i32> @test_smax_vector_neg_one(<4 x i32> %a) { ; CHECK-LABEL: test_smax_vector_neg_one: ; CHECK: // %bb.0: ; CHECK-NEXT: cmge v1.4s, v0.4s, #0 ; CHECK-NEXT: bic v0.16b, v1.16b, v0.16b ; CHECK-NEXT: ret %1 = tail call <4 x i32> @llvm.smax.v4i32(<4 x i32> %a, <4 x i32> ) %retval.0 = xor <4 x i32> %1, ret <4 x i32> %retval.0 } define <4 x i32> @test_smax_vector_constant(<4 x i32> %a) { ; CHECK-LABEL: test_smax_vector_constant: ; CHECK: // %bb.0: ; CHECK-NEXT: movi v1.4s, #8 ; CHECK-NEXT: smax v0.4s, v0.4s, v1.4s ; CHECK-NEXT: eor v0.16b, v0.16b, v1.16b ; CHECK-NEXT: ret %1 = tail call <4 x i32> @llvm.smax.v4i32(<4 x i32> %a, <4 x i32> ) %retval.0 = xor <4 x i32> %1, ret <4 x i32> %retval.0 } define <4 x i32> @test_umin_vector_neg_one(<4 x i32> %a) { ; CHECK-LABEL: test_umin_vector_neg_one: ; CHECK: // %bb.0: ; CHECK-NEXT: mvn v0.16b, v0.16b ; CHECK-NEXT: ret %1 = tail call <4 x i32> @llvm.umin.v4i32(<4 x i32> %a, <4 x i32> ) %retval.0 = xor <4 x i32> %1, ret <4 x i32> %retval.0 } define <4 x i32> @test_umin_vector_constant(<4 x i32> %a) { ; CHECK-LABEL: test_umin_vector_constant: ; CHECK: // %bb.0: ; CHECK-NEXT: movi v1.4s, #8 ; CHECK-NEXT: umin v0.4s, v0.4s, v1.4s ; CHECK-NEXT: eor v0.16b, v0.16b, v1.16b ; CHECK-NEXT: ret %1 = tail call <4 x i32> @llvm.umin.v4i32(<4 x i32> %a, <4 x i32> ) %retval.0 = xor <4 x i32> %1, ret <4 x i32> %retval.0 } define <4 x i32> @test_umax_vector_neg_one(<4 x i32> %a) { ; CHECK-LABEL: test_umax_vector_neg_one: ; CHECK: // %bb.0: ; CHECK-NEXT: movi v0.2d, #0000000000000000 ; CHECK-NEXT: ret %1 = tail call <4 x i32> @llvm.umax.v4i32(<4 x i32> %a, <4 x i32> ) %retval.0 = xor <4 x i32> %1, ret <4 x i32> %retval.0 } define <4 x i32> @test_umax_vector_constant(<4 x i32> %a) { ; CHECK-LABEL: test_umax_vector_constant: ; CHECK: // %bb.0: ; CHECK-NEXT: movi v1.4s, #8 ; CHECK-NEXT: umax v0.4s, v0.4s, v1.4s ; CHECK-NEXT: eor v0.16b, v0.16b, v1.16b ; CHECK-NEXT: ret %1 = tail call <4 x i32> @llvm.umax.v4i32(<4 x i32> %a, <4 x i32> ) %retval.0 = xor <4 x i32> %1, ret <4 x i32> %retval.0 } declare i64 @llvm.smin.i64(i64, i64) declare i64 @llvm.smax.i64(i64, i64) declare i64 @llvm.umin.i64(i64, i64) declare i64 @llvm.umax.i64(i64, i64) declare <4 x i32> @llvm.smin.v4i32(<4 x i32>, <4 x i32>) declare <4 x i32> @llvm.smax.v4i32(<4 x i32>, <4 x i32>) declare <4 x i32> @llvm.umin.v4i32(<4 x i32>, <4 x i32>) declare <4 x i32> @llvm.umax.v4i32(<4 x i32>, <4 x i32>)