aboutsummaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen/ARM/strict-fp-ops.ll
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/ARM/strict-fp-ops.ll')
-rw-r--r--llvm/test/CodeGen/ARM/strict-fp-ops.ll202
1 files changed, 202 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/ARM/strict-fp-ops.ll b/llvm/test/CodeGen/ARM/strict-fp-ops.ll
new file mode 100644
index 0000000..608ab07
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/strict-fp-ops.ll
@@ -0,0 +1,202 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple armv7-- -mattr=+vfp4 %s -o - | FileCheck %s
+
+
+; Div whose result is unused should be removed unless we have strict exceptions
+
+define void @unused_div(float %x, float %y) {
+; CHECK-LABEL: unused_div:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: bx lr
+entry:
+ %add = fdiv float %x, %y
+ ret void
+}
+
+define void @unused_div_fpexcept_strict(float %x, float %y) #0 {
+; CHECK-LABEL: unused_div_fpexcept_strict:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov s0, r1
+; CHECK-NEXT: vmov s2, r0
+; CHECK-NEXT: vdiv.f32 s0, s2, s0
+; CHECK-NEXT: bx lr
+entry:
+ %add = call float @llvm.experimental.constrained.fdiv.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret void
+}
+
+define void @unused_div_round_dynamic(float %x, float %y) #0 {
+; CHECK-LABEL: unused_div_round_dynamic:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: bx lr
+entry:
+ %add = call float @llvm.experimental.constrained.fdiv.f32(float %x, float %y, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
+ ret void
+}
+
+
+; Machine CSE should eliminate the second add unless we have strict exceptions
+
+define float @add_twice(float %x, float %y, i32 %n) {
+; CHECK-LABEL: add_twice:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov s0, r1
+; CHECK-NEXT: cmp r2, #0
+; CHECK-NEXT: vmov s2, r0
+; CHECK-NEXT: vadd.f32 s0, s2, s0
+; CHECK-NEXT: vmul.f32 s2, s0, s0
+; CHECK-NEXT: vmoveq.f32 s2, s0
+; CHECK-NEXT: vmov r0, s2
+; CHECK-NEXT: bx lr
+entry:
+ %add = fadd float %x, %y
+ %tobool.not = icmp eq i32 %n, 0
+ br i1 %tobool.not, label %if.end, label %if.then
+
+if.then:
+ %add1 = fadd float %x, %y
+ %mul = fmul float %add, %add1
+ br label %if.end
+
+if.end:
+ %a.0 = phi float [ %mul, %if.then ], [ %add, %entry ]
+ ret float %a.0
+}
+
+define float @add_twice_fpexcept_strict(float %x, float %y, i32 %n) #0 {
+; CHECK-LABEL: add_twice_fpexcept_strict:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov s2, r1
+; CHECK-NEXT: cmp r2, #0
+; CHECK-NEXT: vmov s4, r0
+; CHECK-NEXT: vadd.f32 s0, s4, s2
+; CHECK-NEXT: vaddne.f32 s2, s4, s2
+; CHECK-NEXT: vmulne.f32 s0, s0, s2
+; CHECK-NEXT: vmov r0, s0
+; CHECK-NEXT: bx lr
+entry:
+ %add = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ %tobool.not = icmp eq i32 %n, 0
+ br i1 %tobool.not, label %if.end, label %if.then
+
+if.then:
+ %add1 = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ %mul = call float @llvm.experimental.constrained.fmul.f32(float %add, float %add1, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ br label %if.end
+
+if.end:
+ %a.0 = phi float [ %mul, %if.then ], [ %add, %entry ]
+ ret float %a.0
+}
+
+define float @add_twice_round_dynamic(float %x, float %y, i32 %n) #0 {
+; CHECK-LABEL: add_twice_round_dynamic:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov s0, r1
+; CHECK-NEXT: cmp r2, #0
+; CHECK-NEXT: vmov s2, r0
+; CHECK-NEXT: vadd.f32 s0, s2, s0
+; CHECK-NEXT: vmulne.f32 s0, s0, s0
+; CHECK-NEXT: vmov r0, s0
+; CHECK-NEXT: bx lr
+entry:
+ %add = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
+ %tobool.not = icmp eq i32 %n, 0
+ br i1 %tobool.not, label %if.end, label %if.then
+
+if.then:
+ %add1 = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
+ %mul = call float @llvm.experimental.constrained.fmul.f32(float %add, float %add1, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
+ br label %if.end
+
+if.end:
+ %a.0 = phi float [ %mul, %if.then ], [ %add, %entry ]
+ ret float %a.0
+}
+
+; Two adds separated by llvm.set.rounding should be preserved when rounding is
+; dynamic (as they may give different results) or when we have strict exceptions
+; (the llvm.set.rounding is irrelevant, but both could trap).
+
+define float @set_rounding(float %x, float %y) {
+; CHECK-LABEL: set_rounding:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmrs r2, fpscr
+; CHECK-NEXT: vmov s2, r0
+; CHECK-NEXT: vmov s0, r1
+; CHECK-NEXT: vadd.f32 s0, s2, s0
+; CHECK-NEXT: vsub.f32 s0, s0, s0
+; CHECK-NEXT: orr r0, r2, #12582912
+; CHECK-NEXT: vmsr fpscr, r0
+; CHECK-NEXT: vmov r0, s0
+; CHECK-NEXT: vmrs r1, fpscr
+; CHECK-NEXT: bic r1, r1, #12582912
+; CHECK-NEXT: vmsr fpscr, r1
+; CHECK-NEXT: bx lr
+entry:
+ %add1 = fadd float %x, %y
+ call void @llvm.set.rounding(i32 0)
+ %add2 = fadd float %x, %y
+ call void @llvm.set.rounding(i32 1)
+ %sub = fsub float %add1, %add2
+ ret float %sub
+}
+
+define float @set_rounding_fpexcept_strict(float %x, float %y) #0 {
+; CHECK-LABEL: set_rounding_fpexcept_strict:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov s0, r1
+; CHECK-NEXT: vmov s2, r0
+; CHECK-NEXT: vadd.f32 s4, s2, s0
+; CHECK-NEXT: vmrs r0, fpscr
+; CHECK-NEXT: orr r0, r0, #12582912
+; CHECK-NEXT: vmsr fpscr, r0
+; CHECK-NEXT: vadd.f32 s0, s2, s0
+; CHECK-NEXT: vmrs r0, fpscr
+; CHECK-NEXT: bic r0, r0, #12582912
+; CHECK-NEXT: vmsr fpscr, r0
+; CHECK-NEXT: vsub.f32 s0, s4, s0
+; CHECK-NEXT: vmov r0, s0
+; CHECK-NEXT: bx lr
+entry:
+ %add1 = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ call void @llvm.set.rounding(i32 0) #0
+ %add2 = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ call void @llvm.set.rounding(i32 1) #0
+ %sub = call float @llvm.experimental.constrained.fsub.f32(float %add1, float %add2, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret float %sub
+}
+
+define float @set_rounding_round_dynamic(float %x, float %y) #0 {
+; CHECK-LABEL: set_rounding_round_dynamic:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov s2, r0
+; CHECK-NEXT: vmrs r0, fpscr
+; CHECK-NEXT: vmov s0, r1
+; CHECK-NEXT: vadd.f32 s4, s2, s0
+; CHECK-NEXT: orr r0, r0, #12582912
+; CHECK-NEXT: vmsr fpscr, r0
+; CHECK-NEXT: vmrs r0, fpscr
+; CHECK-NEXT: vadd.f32 s0, s2, s0
+; CHECK-NEXT: bic r0, r0, #12582912
+; CHECK-NEXT: vmsr fpscr, r0
+; CHECK-NEXT: vsub.f32 s0, s4, s0
+; CHECK-NEXT: vmov r0, s0
+; CHECK-NEXT: bx lr
+entry:
+ %add1 = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
+ call void @llvm.set.rounding(i32 0) #0
+ %add2 = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
+ call void @llvm.set.rounding(i32 1) #0
+ %sub = call float @llvm.experimental.constrained.fsub.f32(float %add1, float %add2, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
+ ret float %sub
+}
+
+declare float @llvm.experimental.constrained.fadd.f32(float, float, metadata, metadata)
+declare float @llvm.experimental.constrained.fsub.f32(float, float, metadata, metadata)
+declare float @llvm.experimental.constrained.fmul.f32(float, float, metadata, metadata)
+declare float @llvm.experimental.constrained.fdiv.f32(float, float, metadata, metadata)
+declare i32 @llvm.get.rounding()
+declare void @llvm.set.rounding(i32)
+
+attributes #0 = { strictfp }