; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc < %s -mtriple=aarch64 | FileCheck %s ; When consuming profile data we sometimes flip a branch to improve runtime ; performance. If we are optimizing for size, we avoid changing the branch to ; improve outlining and ICF. define i8 @foo_optsize(i32 %v4) optsize { ; CHECK-LABEL: foo_optsize: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: cbz wzr, .LBB0_2 ; CHECK-NEXT: .LBB0_1: ; CHECK-NEXT: mov w0, wzr ; CHECK-NEXT: ret ; CHECK-NEXT: .LBB0_2: // %b1 ; CHECK-NEXT: cbnz w0, .LBB0_4 ; CHECK-NEXT: .LBB0_3: // %b2 ; CHECK-NEXT: mov w0, #1 // =0x1 ; CHECK-NEXT: ret ; CHECK-NEXT: .LBB0_4: // %b1 ; CHECK-NEXT: cmp w0, #1 ; CHECK-NEXT: b.ne .LBB0_1 ; CHECK-NEXT: // %bb.5: // %b3 ; CHECK-NEXT: cbz wzr, .LBB0_1 ; CHECK-NEXT: b .LBB0_3 entry: %v2 = icmp eq i32 0, 0 br i1 %v2, label %b1, label %b4 b1: switch i32 %v4, label %b4 [ i32 1, label %b3 i32 0, label %b2 ], !prof !0 b2: br label %b4 b3: %v3 = icmp eq i32 0, 0 br i1 %v3, label %b4, label %b2 b4: %v16 = phi i8 [ 1, %b2 ], [ 0, %entry ], [ 0, %b3 ], [ 0, %b1 ] ret i8 %v16 } define i8 @foo_optspeed(i32 %v4) { ; CHECK-LABEL: foo_optspeed: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: cbz wzr, .LBB1_2 ; CHECK-NEXT: .LBB1_1: ; CHECK-NEXT: mov w0, wzr ; CHECK-NEXT: ret ; CHECK-NEXT: .LBB1_2: // %b1 ; CHECK-NEXT: cbnz w0, .LBB1_4 ; CHECK-NEXT: .LBB1_3: // %b2 ; CHECK-NEXT: mov w0, #1 // =0x1 ; CHECK-NEXT: ret ; CHECK-NEXT: .LBB1_4: // %b1 ; CHECK-NEXT: cmp w0, #1 ; CHECK-NEXT: b.ne .LBB1_1 ; CHECK-NEXT: // %bb.5: // %b3 ; CHECK-NEXT: cbnz wzr, .LBB1_3 ; CHECK-NEXT: b .LBB1_1 entry: %v2 = icmp eq i32 0, 0 br i1 %v2, label %b1, label %b4 b1: switch i32 %v4, label %b4 [ i32 1, label %b3 i32 0, label %b2 ], !prof !0 b2: br label %b4 b3: %v3 = icmp eq i32 0, 0 br i1 %v3, label %b4, label %b2 b4: %v16 = phi i8 [ 1, %b2 ], [ 0, %entry ], [ 0, %b3 ], [ 0, %b1 ] ret i8 %v16 } !0 = !{!"branch_weights", i32 5, i32 5, i32 100}