; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc -mtriple=aarch64 %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD ; RUN: llc -mtriple=aarch64 -global-isel=true -global-isel-abort=2 %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI ; Check that constrained fp intrinsics are correctly lowered. ; CHECK-GI: warning: Instruction selection used fallback path for add_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sub_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for mul_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for div_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for frem_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fma_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fptosi_i32_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fptoui_i32_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fptosi_i64_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fptoui_i64_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sitofp_f32_i32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for uitofp_f32_i32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sitofp_f32_i64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for uitofp_f32_i64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sitofp_f32_i128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for uitofp_f32_i128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sqrt_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for powi_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sin_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for cos_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for tan_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for asin_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for acos_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for atan_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for atan2_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sinh_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for cosh_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for tanh_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for pow_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for log_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for log10_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for log2_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for exp_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for exp2_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for rint_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for nearbyint_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for lrint_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for llrint_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for maxnum_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for minnum_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for maximum_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for minimum_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for ceil_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for floor_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for lround_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for llround_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for round_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for roundeven_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for trunc_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmp_olt_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmp_ole_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmp_ogt_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmp_oge_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmp_oeq_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmp_one_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmp_ult_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmp_ule_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmp_ugt_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmp_uge_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmp_ueq_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmp_une_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmps_olt_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmps_ole_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmps_ogt_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmps_oge_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmps_oeq_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmps_one_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmps_ult_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmps_ule_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmps_ugt_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmps_uge_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmps_ueq_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmps_une_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for add_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sub_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for mul_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for div_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for frem_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fma_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fptosi_i32_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fptoui_i32_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fptosi_i64_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fptoui_i64_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sitofp_f64_i32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for uitofp_f64_i32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sitofp_f64_i64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for uitofp_f64_i64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sitofp_f64_i128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for uitofp_f64_i128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sqrt_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for powi_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sin_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for cos_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for tan_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for asin_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for acos_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for atan_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for atan2_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sinh_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for cosh_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for tanh_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for pow_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for log_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for log10_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for log2_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for exp_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for exp2_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for rint_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for nearbyint_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for lrint_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for llrint_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for maxnum_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for minnum_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for maximum_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for minimum_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for ceil_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for floor_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for lround_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for llround_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for round_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for roundeven_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for trunc_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmp_olt_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmp_ole_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmp_ogt_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmp_oge_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmp_oeq_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmp_one_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmp_ult_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmp_ule_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmp_ugt_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmp_uge_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmp_ueq_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmp_une_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmps_olt_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmps_ole_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmps_ogt_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmps_oge_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmps_oeq_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmps_one_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmps_ult_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmps_ule_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmps_ugt_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmps_uge_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmps_ueq_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmps_une_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for add_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sub_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for mul_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for div_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for frem_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fma_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fptosi_i32_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fptoui_i32_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fptosi_i64_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fptoui_i64_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sitofp_f128_i32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for uitofp_f128_i32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sitofp_f128_i64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for uitofp_f128_i64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sitofp_f128_i128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for uitofp_f128_i128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sqrt_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for powi_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sin_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for cos_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for tan_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for asin_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for acos_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for atan_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for atan2_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sinh_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for cosh_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for tanh_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for pow_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for log_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for log10_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for log2_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for exp_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for exp2_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for rint_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for nearbyint_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for lrint_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for llrint_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for maxnum_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for minnum_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for ceil_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for floor_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for lround_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for llround_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for round_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for trunc_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmp_olt_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmp_ole_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmp_ogt_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmp_oge_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmp_oeq_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmp_one_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmp_ult_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmp_ule_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmp_ugt_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmp_uge_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmp_ueq_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmp_une_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmps_olt_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmps_ole_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmps_ogt_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmps_oge_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmps_oeq_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmps_one_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmps_ult_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmps_ule_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmps_ugt_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmps_uge_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmps_ueq_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmps_une_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fptrunc_f32_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fptrunc_f32_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fptrunc_f64_f128 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fpext_f64_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fpext_f128_f32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fpext_f128_f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sin_v1f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for cos_v1f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for tan_v1f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for asin_v1f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for acos_v1f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for atan_v1f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for atan2_v1f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sinh_v1f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for cosh_v1f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for tanh_v1f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for pow_v1f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for log_v1f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for log2_v1f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for log10_v1f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for exp_v1f64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for exp2_v1f64 ; Single-precision intrinsics define float @add_f32(float %x, float %y) #0 { ; CHECK-LABEL: add_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: fadd s0, s0, s1 ; CHECK-NEXT: ret %val = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret float %val } define float @sub_f32(float %x, float %y) #0 { ; CHECK-LABEL: sub_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: fsub s0, s0, s1 ; CHECK-NEXT: ret %val = call float @llvm.experimental.constrained.fsub.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret float %val } define float @mul_f32(float %x, float %y) #0 { ; CHECK-LABEL: mul_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: fmul s0, s0, s1 ; CHECK-NEXT: ret %val = call float @llvm.experimental.constrained.fmul.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret float %val } define float @div_f32(float %x, float %y) #0 { ; CHECK-LABEL: div_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: fdiv s0, s0, s1 ; CHECK-NEXT: ret %val = call float @llvm.experimental.constrained.fdiv.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret float %val } define float @frem_f32(float %x, float %y) #0 { ; CHECK-LABEL: frem_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl fmodf ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call float @llvm.experimental.constrained.frem.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret float %val } define float @fma_f32(float %x, float %y, float %z) #0 { ; CHECK-LABEL: fma_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: fmadd s0, s0, s1, s2 ; CHECK-NEXT: ret %val = call float @llvm.experimental.constrained.fma.f32(float %x, float %y, float %z, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret float %val } define i32 @fptosi_i32_f32(float %x) #0 { ; CHECK-LABEL: fptosi_i32_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: fcvtzs w0, s0 ; CHECK-NEXT: ret %val = call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %x, metadata !"fpexcept.strict") #0 ret i32 %val } define i32 @fptoui_i32_f32(float %x) #0 { ; CHECK-LABEL: fptoui_i32_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: fcvtzu w0, s0 ; CHECK-NEXT: ret %val = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %x, metadata !"fpexcept.strict") #0 ret i32 %val } define i64 @fptosi_i64_f32(float %x) #0 { ; CHECK-LABEL: fptosi_i64_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: fcvtzs x0, s0 ; CHECK-NEXT: ret %val = call i64 @llvm.experimental.constrained.fptosi.i64.f32(float %x, metadata !"fpexcept.strict") #0 ret i64 %val } define i64 @fptoui_i64_f32(float %x) #0 { ; CHECK-LABEL: fptoui_i64_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: fcvtzu x0, s0 ; CHECK-NEXT: ret %val = call i64 @llvm.experimental.constrained.fptoui.i64.f32(float %x, metadata !"fpexcept.strict") #0 ret i64 %val } define float @sitofp_f32_i32(i32 %x) #0 { ; CHECK-LABEL: sitofp_f32_i32: ; CHECK: // %bb.0: ; CHECK-NEXT: scvtf s0, w0 ; CHECK-NEXT: ret %val = call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret float %val } define float @uitofp_f32_i32(i32 %x) #0 { ; CHECK-LABEL: uitofp_f32_i32: ; CHECK: // %bb.0: ; CHECK-NEXT: ucvtf s0, w0 ; CHECK-NEXT: ret %val = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret float %val } define float @sitofp_f32_i64(i64 %x) #0 { ; CHECK-LABEL: sitofp_f32_i64: ; CHECK: // %bb.0: ; CHECK-NEXT: scvtf s0, x0 ; CHECK-NEXT: ret %val = call float @llvm.experimental.constrained.sitofp.f32.i64(i64 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret float %val } define float @uitofp_f32_i64(i64 %x) #0 { ; CHECK-LABEL: uitofp_f32_i64: ; CHECK: // %bb.0: ; CHECK-NEXT: ucvtf s0, x0 ; CHECK-NEXT: ret %val = call float @llvm.experimental.constrained.uitofp.f32.i64(i64 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret float %val } define float @sitofp_f32_i128(i128 %x) #0 { ; CHECK-LABEL: sitofp_f32_i128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl __floattisf ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call float @llvm.experimental.constrained.sitofp.f32.i128(i128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret float %val } define float @uitofp_f32_i128(i128 %x) #0 { ; CHECK-LABEL: uitofp_f32_i128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl __floatuntisf ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call float @llvm.experimental.constrained.uitofp.f32.i128(i128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret float %val } define float @sqrt_f32(float %x) #0 { ; CHECK-LABEL: sqrt_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: fsqrt s0, s0 ; CHECK-NEXT: ret %val = call float @llvm.experimental.constrained.sqrt.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret float %val } define float @powi_f32(float %x, i32 %y) #0 { ; CHECK-LABEL: powi_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl __powisf2 ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call float @llvm.experimental.constrained.powi.f32(float %x, i32 %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret float %val } define float @sin_f32(float %x) #0 { ; CHECK-LABEL: sin_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl sinf ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call float @llvm.experimental.constrained.sin.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret float %val } define float @cos_f32(float %x) #0 { ; CHECK-LABEL: cos_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl cosf ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call float @llvm.experimental.constrained.cos.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret float %val } define float @tan_f32(float %x) #0 { ; CHECK-LABEL: tan_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl tanf ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call float @llvm.experimental.constrained.tan.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret float %val } define float @asin_f32(float %x) #0 { ; CHECK-LABEL: asin_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl asinf ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call float @llvm.experimental.constrained.asin.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret float %val } define float @acos_f32(float %x) #0 { ; CHECK-LABEL: acos_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl acosf ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call float @llvm.experimental.constrained.acos.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret float %val } define float @atan_f32(float %x) #0 { ; CHECK-LABEL: atan_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl atanf ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call float @llvm.experimental.constrained.atan.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret float %val } define float @atan2_f32(float %x, float %y) #0 { ; CHECK-LABEL: atan2_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl atan2f ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call float @llvm.experimental.constrained.atan2.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret float %val } define float @sinh_f32(float %x) #0 { ; CHECK-LABEL: sinh_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl sinhf ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call float @llvm.experimental.constrained.sinh.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret float %val } define float @cosh_f32(float %x) #0 { ; CHECK-LABEL: cosh_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl coshf ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call float @llvm.experimental.constrained.cosh.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret float %val } define float @tanh_f32(float %x) #0 { ; CHECK-LABEL: tanh_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl tanhf ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call float @llvm.experimental.constrained.tanh.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret float %val } define float @pow_f32(float %x, float %y) #0 { ; CHECK-LABEL: pow_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl powf ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call float @llvm.experimental.constrained.pow.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret float %val } define float @log_f32(float %x) #0 { ; CHECK-LABEL: log_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl logf ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call float @llvm.experimental.constrained.log.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret float %val } define float @log10_f32(float %x) #0 { ; CHECK-LABEL: log10_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl log10f ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call float @llvm.experimental.constrained.log10.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret float %val } define float @log2_f32(float %x) #0 { ; CHECK-LABEL: log2_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl log2f ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call float @llvm.experimental.constrained.log2.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret float %val } define float @exp_f32(float %x) #0 { ; CHECK-LABEL: exp_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl expf ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call float @llvm.experimental.constrained.exp.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret float %val } define float @exp2_f32(float %x) #0 { ; CHECK-LABEL: exp2_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl exp2f ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call float @llvm.experimental.constrained.exp2.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret float %val } define float @rint_f32(float %x) #0 { ; CHECK-LABEL: rint_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: frintx s0, s0 ; CHECK-NEXT: ret %val = call float @llvm.experimental.constrained.rint.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret float %val } define float @nearbyint_f32(float %x) #0 { ; CHECK-LABEL: nearbyint_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: frinti s0, s0 ; CHECK-NEXT: ret %val = call float @llvm.experimental.constrained.nearbyint.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret float %val } define i32 @lrint_f32(float %x) #0 { ; CHECK-LABEL: lrint_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: frintx s0, s0 ; CHECK-NEXT: fcvtzs w0, s0 ; CHECK-NEXT: ret %val = call i32 @llvm.experimental.constrained.lrint.i32.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret i32 %val } define i64 @llrint_f32(float %x) #0 { ; CHECK-LABEL: llrint_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: frintx s0, s0 ; CHECK-NEXT: fcvtzs x0, s0 ; CHECK-NEXT: ret %val = call i64 @llvm.experimental.constrained.llrint.i64.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret i64 %val } define float @maxnum_f32(float %x, float %y) #0 { ; CHECK-LABEL: maxnum_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: fmaxnm s0, s0, s1 ; CHECK-NEXT: ret %val = call float @llvm.experimental.constrained.maxnum.f32(float %x, float %y, metadata !"fpexcept.strict") #0 ret float %val } define float @minnum_f32(float %x, float %y) #0 { ; CHECK-LABEL: minnum_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: fminnm s0, s0, s1 ; CHECK-NEXT: ret %val = call float @llvm.experimental.constrained.minnum.f32(float %x, float %y, metadata !"fpexcept.strict") #0 ret float %val } define float @maximum_f32(float %x, float %y) #0 { ; CHECK-LABEL: maximum_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: fmax s0, s0, s1 ; CHECK-NEXT: ret %val = call float @llvm.experimental.constrained.maximum.f32(float %x, float %y, metadata !"fpexcept.strict") #0 ret float %val } define float @minimum_f32(float %x, float %y) #0 { ; CHECK-LABEL: minimum_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: fmin s0, s0, s1 ; CHECK-NEXT: ret %val = call float @llvm.experimental.constrained.minimum.f32(float %x, float %y, metadata !"fpexcept.strict") #0 ret float %val } define float @ceil_f32(float %x) #0 { ; CHECK-LABEL: ceil_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: frintp s0, s0 ; CHECK-NEXT: ret %val = call float @llvm.experimental.constrained.ceil.f32(float %x, metadata !"fpexcept.strict") #0 ret float %val } define float @floor_f32(float %x) #0 { ; CHECK-LABEL: floor_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: frintm s0, s0 ; CHECK-NEXT: ret %val = call float @llvm.experimental.constrained.floor.f32(float %x, metadata !"fpexcept.strict") #0 ret float %val } define i32 @lround_f32(float %x) #0 { ; CHECK-LABEL: lround_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: fcvtas w0, s0 ; CHECK-NEXT: ret %val = call i32 @llvm.experimental.constrained.lround.i32.f32(float %x, metadata !"fpexcept.strict") #0 ret i32 %val } define i64 @llround_f32(float %x) #0 { ; CHECK-LABEL: llround_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: fcvtas x0, s0 ; CHECK-NEXT: ret %val = call i64 @llvm.experimental.constrained.llround.i64.f32(float %x, metadata !"fpexcept.strict") #0 ret i64 %val } define float @round_f32(float %x) #0 { ; CHECK-LABEL: round_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: frinta s0, s0 ; CHECK-NEXT: ret %val = call float @llvm.experimental.constrained.round.f32(float %x, metadata !"fpexcept.strict") #0 ret float %val } define float @roundeven_f32(float %x) #0 { ; CHECK-LABEL: roundeven_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: frintn s0, s0 ; CHECK-NEXT: ret %val = call float @llvm.experimental.constrained.roundeven.f32(float %x, metadata !"fpexcept.strict") #0 ret float %val } define float @trunc_f32(float %x) #0 { ; CHECK-LABEL: trunc_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: frintz s0, s0 ; CHECK-NEXT: ret %val = call float @llvm.experimental.constrained.trunc.f32(float %x, metadata !"fpexcept.strict") #0 ret float %val } define i32 @fcmp_olt_f32(float %a, float %b) #0 { ; CHECK-LABEL: fcmp_olt_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: fcmp s0, s1 ; CHECK-NEXT: cset w0, mi ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"olt", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmp_ole_f32(float %a, float %b) #0 { ; CHECK-LABEL: fcmp_ole_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: fcmp s0, s1 ; CHECK-NEXT: cset w0, ls ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ole", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmp_ogt_f32(float %a, float %b) #0 { ; CHECK-LABEL: fcmp_ogt_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: fcmp s0, s1 ; CHECK-NEXT: cset w0, gt ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ogt", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmp_oge_f32(float %a, float %b) #0 { ; CHECK-LABEL: fcmp_oge_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: fcmp s0, s1 ; CHECK-NEXT: cset w0, ge ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"oge", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmp_oeq_f32(float %a, float %b) #0 { ; CHECK-LABEL: fcmp_oeq_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: fcmp s0, s1 ; CHECK-NEXT: cset w0, eq ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"oeq", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmp_one_f32(float %a, float %b) #0 { ; CHECK-LABEL: fcmp_one_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: fcmp s0, s1 ; CHECK-NEXT: cset w8, mi ; CHECK-NEXT: csinc w0, w8, wzr, le ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"one", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmp_ult_f32(float %a, float %b) #0 { ; CHECK-LABEL: fcmp_ult_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: fcmp s0, s1 ; CHECK-NEXT: cset w0, lt ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ult", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmp_ule_f32(float %a, float %b) #0 { ; CHECK-LABEL: fcmp_ule_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: fcmp s0, s1 ; CHECK-NEXT: cset w0, le ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ule", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmp_ugt_f32(float %a, float %b) #0 { ; CHECK-LABEL: fcmp_ugt_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: fcmp s0, s1 ; CHECK-NEXT: cset w0, hi ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ugt", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmp_uge_f32(float %a, float %b) #0 { ; CHECK-LABEL: fcmp_uge_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: fcmp s0, s1 ; CHECK-NEXT: cset w0, pl ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"uge", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmp_ueq_f32(float %a, float %b) #0 { ; CHECK-LABEL: fcmp_ueq_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: fcmp s0, s1 ; CHECK-NEXT: cset w8, eq ; CHECK-NEXT: csinc w0, w8, wzr, vc ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ueq", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmp_une_f32(float %a, float %b) #0 { ; CHECK-LABEL: fcmp_une_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: fcmp s0, s1 ; CHECK-NEXT: cset w0, ne ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"une", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmps_olt_f32(float %a, float %b) #0 { ; CHECK-LABEL: fcmps_olt_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: fcmpe s0, s1 ; CHECK-NEXT: cset w0, mi ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"olt", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmps_ole_f32(float %a, float %b) #0 { ; CHECK-LABEL: fcmps_ole_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: fcmpe s0, s1 ; CHECK-NEXT: cset w0, ls ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ole", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmps_ogt_f32(float %a, float %b) #0 { ; CHECK-LABEL: fcmps_ogt_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: fcmpe s0, s1 ; CHECK-NEXT: cset w0, gt ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ogt", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmps_oge_f32(float %a, float %b) #0 { ; CHECK-LABEL: fcmps_oge_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: fcmpe s0, s1 ; CHECK-NEXT: cset w0, ge ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"oge", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmps_oeq_f32(float %a, float %b) #0 { ; CHECK-LABEL: fcmps_oeq_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: fcmpe s0, s1 ; CHECK-NEXT: cset w0, eq ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"oeq", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmps_one_f32(float %a, float %b) #0 { ; CHECK-LABEL: fcmps_one_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: fcmpe s0, s1 ; CHECK-NEXT: cset w8, mi ; CHECK-NEXT: csinc w0, w8, wzr, le ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"one", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmps_ult_f32(float %a, float %b) #0 { ; CHECK-LABEL: fcmps_ult_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: fcmpe s0, s1 ; CHECK-NEXT: cset w0, lt ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ult", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmps_ule_f32(float %a, float %b) #0 { ; CHECK-LABEL: fcmps_ule_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: fcmpe s0, s1 ; CHECK-NEXT: cset w0, le ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ule", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmps_ugt_f32(float %a, float %b) #0 { ; CHECK-LABEL: fcmps_ugt_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: fcmpe s0, s1 ; CHECK-NEXT: cset w0, hi ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ugt", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmps_uge_f32(float %a, float %b) #0 { ; CHECK-LABEL: fcmps_uge_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: fcmpe s0, s1 ; CHECK-NEXT: cset w0, pl ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"uge", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmps_ueq_f32(float %a, float %b) #0 { ; CHECK-LABEL: fcmps_ueq_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: fcmpe s0, s1 ; CHECK-NEXT: cset w8, eq ; CHECK-NEXT: csinc w0, w8, wzr, vc ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ueq", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmps_une_f32(float %a, float %b) #0 { ; CHECK-LABEL: fcmps_une_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: fcmpe s0, s1 ; CHECK-NEXT: cset w0, ne ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"une", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } ; Double-precision intrinsics define double @add_f64(double %x, double %y) #0 { ; CHECK-LABEL: add_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: fadd d0, d0, d1 ; CHECK-NEXT: ret %val = call double @llvm.experimental.constrained.fadd.f64(double %x, double %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret double %val } define double @sub_f64(double %x, double %y) #0 { ; CHECK-LABEL: sub_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: fsub d0, d0, d1 ; CHECK-NEXT: ret %val = call double @llvm.experimental.constrained.fsub.f64(double %x, double %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret double %val } define double @mul_f64(double %x, double %y) #0 { ; CHECK-LABEL: mul_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: fmul d0, d0, d1 ; CHECK-NEXT: ret %val = call double @llvm.experimental.constrained.fmul.f64(double %x, double %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret double %val } define double @div_f64(double %x, double %y) #0 { ; CHECK-LABEL: div_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: fdiv d0, d0, d1 ; CHECK-NEXT: ret %val = call double @llvm.experimental.constrained.fdiv.f64(double %x, double %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret double %val } define double @frem_f64(double %x, double %y) #0 { ; CHECK-LABEL: frem_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl fmod ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call double @llvm.experimental.constrained.frem.f64(double %x, double %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret double %val } define double @fma_f64(double %x, double %y, double %z) #0 { ; CHECK-LABEL: fma_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: fmadd d0, d0, d1, d2 ; CHECK-NEXT: ret %val = call double @llvm.experimental.constrained.fma.f64(double %x, double %y, double %z, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret double %val } define i32 @fptosi_i32_f64(double %x) #0 { ; CHECK-LABEL: fptosi_i32_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: fcvtzs w0, d0 ; CHECK-NEXT: ret %val = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %x, metadata !"fpexcept.strict") #0 ret i32 %val } define i32 @fptoui_i32_f64(double %x) #0 { ; CHECK-LABEL: fptoui_i32_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: fcvtzu w0, d0 ; CHECK-NEXT: ret %val = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %x, metadata !"fpexcept.strict") #0 ret i32 %val } define i64 @fptosi_i64_f64(double %x) #0 { ; CHECK-LABEL: fptosi_i64_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: fcvtzs x0, d0 ; CHECK-NEXT: ret %val = call i64 @llvm.experimental.constrained.fptosi.i64.f64(double %x, metadata !"fpexcept.strict") #0 ret i64 %val } define i64 @fptoui_i64_f64(double %x) #0 { ; CHECK-LABEL: fptoui_i64_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: fcvtzu x0, d0 ; CHECK-NEXT: ret %val = call i64 @llvm.experimental.constrained.fptoui.i64.f64(double %x, metadata !"fpexcept.strict") #0 ret i64 %val } define double @sitofp_f64_i32(i32 %x) #0 { ; CHECK-LABEL: sitofp_f64_i32: ; CHECK: // %bb.0: ; CHECK-NEXT: scvtf d0, w0 ; CHECK-NEXT: ret %val = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret double %val } define double @uitofp_f64_i32(i32 %x) #0 { ; CHECK-LABEL: uitofp_f64_i32: ; CHECK: // %bb.0: ; CHECK-NEXT: ucvtf d0, w0 ; CHECK-NEXT: ret %val = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret double %val } define double @sitofp_f64_i64(i64 %x) #0 { ; CHECK-LABEL: sitofp_f64_i64: ; CHECK: // %bb.0: ; CHECK-NEXT: scvtf d0, x0 ; CHECK-NEXT: ret %val = call double @llvm.experimental.constrained.sitofp.f64.i64(i64 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret double %val } define double @uitofp_f64_i64(i64 %x) #0 { ; CHECK-LABEL: uitofp_f64_i64: ; CHECK: // %bb.0: ; CHECK-NEXT: ucvtf d0, x0 ; CHECK-NEXT: ret %val = call double @llvm.experimental.constrained.uitofp.f64.i64(i64 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret double %val } define double @sitofp_f64_i128(i128 %x) #0 { ; CHECK-LABEL: sitofp_f64_i128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl __floattidf ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call double @llvm.experimental.constrained.sitofp.f64.i128(i128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret double %val } define double @uitofp_f64_i128(i128 %x) #0 { ; CHECK-LABEL: uitofp_f64_i128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl __floatuntidf ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call double @llvm.experimental.constrained.uitofp.f64.i128(i128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret double %val } define double @sqrt_f64(double %x) #0 { ; CHECK-LABEL: sqrt_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: fsqrt d0, d0 ; CHECK-NEXT: ret %val = call double @llvm.experimental.constrained.sqrt.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret double %val } define double @powi_f64(double %x, i32 %y) #0 { ; CHECK-LABEL: powi_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl __powidf2 ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call double @llvm.experimental.constrained.powi.f64(double %x, i32 %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret double %val } define double @sin_f64(double %x) #0 { ; CHECK-LABEL: sin_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl sin ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call double @llvm.experimental.constrained.sin.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret double %val } define double @cos_f64(double %x) #0 { ; CHECK-LABEL: cos_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl cos ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call double @llvm.experimental.constrained.cos.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret double %val } define double @tan_f64(double %x) #0 { ; CHECK-LABEL: tan_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl tan ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call double @llvm.experimental.constrained.tan.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret double %val } define double @asin_f64(double %x) #0 { ; CHECK-LABEL: asin_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl asin ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call double @llvm.experimental.constrained.asin.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret double %val } define double @acos_f64(double %x) #0 { ; CHECK-LABEL: acos_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl acos ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call double @llvm.experimental.constrained.acos.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret double %val } define double @atan_f64(double %x) #0 { ; CHECK-LABEL: atan_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl atan ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call double @llvm.experimental.constrained.atan.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret double %val } define double @atan2_f64(double %x, double %y) #0 { ; CHECK-LABEL: atan2_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl atan2 ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call double @llvm.experimental.constrained.atan2.f64(double %x, double %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret double %val } define double @sinh_f64(double %x) #0 { ; CHECK-LABEL: sinh_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl sinh ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call double @llvm.experimental.constrained.sinh.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret double %val } define double @cosh_f64(double %x) #0 { ; CHECK-LABEL: cosh_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl cosh ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call double @llvm.experimental.constrained.cosh.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret double %val } define double @tanh_f64(double %x) #0 { ; CHECK-LABEL: tanh_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl tanh ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call double @llvm.experimental.constrained.tanh.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret double %val } define double @pow_f64(double %x, double %y) #0 { ; CHECK-LABEL: pow_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl pow ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call double @llvm.experimental.constrained.pow.f64(double %x, double %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret double %val } define double @log_f64(double %x) #0 { ; CHECK-LABEL: log_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl log ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call double @llvm.experimental.constrained.log.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret double %val } define double @log10_f64(double %x) #0 { ; CHECK-LABEL: log10_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl log10 ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call double @llvm.experimental.constrained.log10.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret double %val } define double @log2_f64(double %x) #0 { ; CHECK-LABEL: log2_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl log2 ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call double @llvm.experimental.constrained.log2.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret double %val } define double @exp_f64(double %x) #0 { ; CHECK-LABEL: exp_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl exp ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call double @llvm.experimental.constrained.exp.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret double %val } define double @exp2_f64(double %x) #0 { ; CHECK-LABEL: exp2_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl exp2 ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call double @llvm.experimental.constrained.exp2.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret double %val } define double @rint_f64(double %x) #0 { ; CHECK-LABEL: rint_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: frintx d0, d0 ; CHECK-NEXT: ret %val = call double @llvm.experimental.constrained.rint.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret double %val } define double @nearbyint_f64(double %x) #0 { ; CHECK-LABEL: nearbyint_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: frinti d0, d0 ; CHECK-NEXT: ret %val = call double @llvm.experimental.constrained.nearbyint.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret double %val } define i32 @lrint_f64(double %x) #0 { ; CHECK-LABEL: lrint_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: frintx d0, d0 ; CHECK-NEXT: fcvtzs w0, d0 ; CHECK-NEXT: ret %val = call i32 @llvm.experimental.constrained.lrint.i32.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret i32 %val } define i64 @llrint_f64(double %x) #0 { ; CHECK-LABEL: llrint_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: frintx d0, d0 ; CHECK-NEXT: fcvtzs x0, d0 ; CHECK-NEXT: ret %val = call i64 @llvm.experimental.constrained.llrint.i64.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret i64 %val } define double @maxnum_f64(double %x, double %y) #0 { ; CHECK-LABEL: maxnum_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: fmaxnm d0, d0, d1 ; CHECK-NEXT: ret %val = call double @llvm.experimental.constrained.maxnum.f64(double %x, double %y, metadata !"fpexcept.strict") #0 ret double %val } define double @minnum_f64(double %x, double %y) #0 { ; CHECK-LABEL: minnum_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: fminnm d0, d0, d1 ; CHECK-NEXT: ret %val = call double @llvm.experimental.constrained.minnum.f64(double %x, double %y, metadata !"fpexcept.strict") #0 ret double %val } define double @maximum_f64(double %x, double %y) #0 { ; CHECK-LABEL: maximum_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: fmax d0, d0, d1 ; CHECK-NEXT: ret %val = call double @llvm.experimental.constrained.maximum.f64(double %x, double %y, metadata !"fpexcept.strict") #0 ret double %val } define double @minimum_f64(double %x, double %y) #0 { ; CHECK-LABEL: minimum_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: fmin d0, d0, d1 ; CHECK-NEXT: ret %val = call double @llvm.experimental.constrained.minimum.f64(double %x, double %y, metadata !"fpexcept.strict") #0 ret double %val } define double @ceil_f64(double %x) #0 { ; CHECK-LABEL: ceil_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: frintp d0, d0 ; CHECK-NEXT: ret %val = call double @llvm.experimental.constrained.ceil.f64(double %x, metadata !"fpexcept.strict") #0 ret double %val } define double @floor_f64(double %x) #0 { ; CHECK-LABEL: floor_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: frintm d0, d0 ; CHECK-NEXT: ret %val = call double @llvm.experimental.constrained.floor.f64(double %x, metadata !"fpexcept.strict") #0 ret double %val } define i32 @lround_f64(double %x) #0 { ; CHECK-LABEL: lround_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: fcvtas w0, d0 ; CHECK-NEXT: ret %val = call i32 @llvm.experimental.constrained.lround.i32.f64(double %x, metadata !"fpexcept.strict") #0 ret i32 %val } define i64 @llround_f64(double %x) #0 { ; CHECK-LABEL: llround_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: fcvtas x0, d0 ; CHECK-NEXT: ret %val = call i64 @llvm.experimental.constrained.llround.i64.f64(double %x, metadata !"fpexcept.strict") #0 ret i64 %val } define double @round_f64(double %x) #0 { ; CHECK-LABEL: round_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: frinta d0, d0 ; CHECK-NEXT: ret %val = call double @llvm.experimental.constrained.round.f64(double %x, metadata !"fpexcept.strict") #0 ret double %val } define double @roundeven_f64(double %x) #0 { ; CHECK-LABEL: roundeven_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: frintn d0, d0 ; CHECK-NEXT: ret %val = call double @llvm.experimental.constrained.roundeven.f64(double %x, metadata !"fpexcept.strict") #0 ret double %val } define double @trunc_f64(double %x) #0 { ; CHECK-LABEL: trunc_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: frintz d0, d0 ; CHECK-NEXT: ret %val = call double @llvm.experimental.constrained.trunc.f64(double %x, metadata !"fpexcept.strict") #0 ret double %val } define i32 @fcmp_olt_f64(double %a, double %b) #0 { ; CHECK-LABEL: fcmp_olt_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: fcmp d0, d1 ; CHECK-NEXT: cset w0, mi ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"olt", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmp_ole_f64(double %a, double %b) #0 { ; CHECK-LABEL: fcmp_ole_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: fcmp d0, d1 ; CHECK-NEXT: cset w0, ls ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ole", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmp_ogt_f64(double %a, double %b) #0 { ; CHECK-LABEL: fcmp_ogt_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: fcmp d0, d1 ; CHECK-NEXT: cset w0, gt ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ogt", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmp_oge_f64(double %a, double %b) #0 { ; CHECK-LABEL: fcmp_oge_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: fcmp d0, d1 ; CHECK-NEXT: cset w0, ge ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oge", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmp_oeq_f64(double %a, double %b) #0 { ; CHECK-LABEL: fcmp_oeq_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: fcmp d0, d1 ; CHECK-NEXT: cset w0, eq ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmp_one_f64(double %a, double %b) #0 { ; CHECK-LABEL: fcmp_one_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: fcmp d0, d1 ; CHECK-NEXT: cset w8, mi ; CHECK-NEXT: csinc w0, w8, wzr, le ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"one", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmp_ult_f64(double %a, double %b) #0 { ; CHECK-LABEL: fcmp_ult_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: fcmp d0, d1 ; CHECK-NEXT: cset w0, lt ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ult", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmp_ule_f64(double %a, double %b) #0 { ; CHECK-LABEL: fcmp_ule_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: fcmp d0, d1 ; CHECK-NEXT: cset w0, le ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ule", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmp_ugt_f64(double %a, double %b) #0 { ; CHECK-LABEL: fcmp_ugt_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: fcmp d0, d1 ; CHECK-NEXT: cset w0, hi ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ugt", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmp_uge_f64(double %a, double %b) #0 { ; CHECK-LABEL: fcmp_uge_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: fcmp d0, d1 ; CHECK-NEXT: cset w0, pl ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"uge", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmp_ueq_f64(double %a, double %b) #0 { ; CHECK-LABEL: fcmp_ueq_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: fcmp d0, d1 ; CHECK-NEXT: cset w8, eq ; CHECK-NEXT: csinc w0, w8, wzr, vc ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ueq", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmp_une_f64(double %a, double %b) #0 { ; CHECK-LABEL: fcmp_une_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: fcmp d0, d1 ; CHECK-NEXT: cset w0, ne ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"une", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmps_olt_f64(double %a, double %b) #0 { ; CHECK-LABEL: fcmps_olt_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: fcmpe d0, d1 ; CHECK-NEXT: cset w0, mi ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"olt", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmps_ole_f64(double %a, double %b) #0 { ; CHECK-LABEL: fcmps_ole_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: fcmpe d0, d1 ; CHECK-NEXT: cset w0, ls ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ole", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmps_ogt_f64(double %a, double %b) #0 { ; CHECK-LABEL: fcmps_ogt_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: fcmpe d0, d1 ; CHECK-NEXT: cset w0, gt ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ogt", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmps_oge_f64(double %a, double %b) #0 { ; CHECK-LABEL: fcmps_oge_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: fcmpe d0, d1 ; CHECK-NEXT: cset w0, ge ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oge", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmps_oeq_f64(double %a, double %b) #0 { ; CHECK-LABEL: fcmps_oeq_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: fcmpe d0, d1 ; CHECK-NEXT: cset w0, eq ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmps_one_f64(double %a, double %b) #0 { ; CHECK-LABEL: fcmps_one_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: fcmpe d0, d1 ; CHECK-NEXT: cset w8, mi ; CHECK-NEXT: csinc w0, w8, wzr, le ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"one", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmps_ult_f64(double %a, double %b) #0 { ; CHECK-LABEL: fcmps_ult_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: fcmpe d0, d1 ; CHECK-NEXT: cset w0, lt ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ult", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmps_ule_f64(double %a, double %b) #0 { ; CHECK-LABEL: fcmps_ule_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: fcmpe d0, d1 ; CHECK-NEXT: cset w0, le ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ule", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmps_ugt_f64(double %a, double %b) #0 { ; CHECK-LABEL: fcmps_ugt_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: fcmpe d0, d1 ; CHECK-NEXT: cset w0, hi ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ugt", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmps_uge_f64(double %a, double %b) #0 { ; CHECK-LABEL: fcmps_uge_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: fcmpe d0, d1 ; CHECK-NEXT: cset w0, pl ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"uge", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmps_ueq_f64(double %a, double %b) #0 { ; CHECK-LABEL: fcmps_ueq_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: fcmpe d0, d1 ; CHECK-NEXT: cset w8, eq ; CHECK-NEXT: csinc w0, w8, wzr, vc ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ueq", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmps_une_f64(double %a, double %b) #0 { ; CHECK-LABEL: fcmps_une_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: fcmpe d0, d1 ; CHECK-NEXT: cset w0, ne ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"une", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } ; Long-double-precision intrinsics define fp128 @add_f128(fp128 %x, fp128 %y) #0 { ; CHECK-LABEL: add_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl __addtf3 ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call fp128 @llvm.experimental.constrained.fadd.f128(fp128 %x, fp128 %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret fp128 %val } define fp128 @sub_f128(fp128 %x, fp128 %y) #0 { ; CHECK-LABEL: sub_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl __subtf3 ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call fp128 @llvm.experimental.constrained.fsub.f128(fp128 %x, fp128 %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret fp128 %val } define fp128 @mul_f128(fp128 %x, fp128 %y) #0 { ; CHECK-LABEL: mul_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl __multf3 ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call fp128 @llvm.experimental.constrained.fmul.f128(fp128 %x, fp128 %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret fp128 %val } define fp128 @div_f128(fp128 %x, fp128 %y) #0 { ; CHECK-LABEL: div_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl __divtf3 ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call fp128 @llvm.experimental.constrained.fdiv.f128(fp128 %x, fp128 %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret fp128 %val } define fp128 @frem_f128(fp128 %x, fp128 %y) #0 { ; CHECK-LABEL: frem_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl fmodl ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call fp128 @llvm.experimental.constrained.frem.f128(fp128 %x, fp128 %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret fp128 %val } define fp128 @fma_f128(fp128 %x, fp128 %y, fp128 %z) #0 { ; CHECK-LABEL: fma_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl fmal ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call fp128 @llvm.experimental.constrained.fma.f128(fp128 %x, fp128 %y, fp128 %z, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret fp128 %val } define i32 @fptosi_i32_f128(fp128 %x) #0 { ; CHECK-LABEL: fptosi_i32_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl __fixtfsi ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call i32 @llvm.experimental.constrained.fptosi.i32.f128(fp128 %x, metadata !"fpexcept.strict") #0 ret i32 %val } define i32 @fptoui_i32_f128(fp128 %x) #0 { ; CHECK-LABEL: fptoui_i32_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl __fixunstfsi ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call i32 @llvm.experimental.constrained.fptoui.i32.f128(fp128 %x, metadata !"fpexcept.strict") #0 ret i32 %val } define i64 @fptosi_i64_f128(fp128 %x) #0 { ; CHECK-LABEL: fptosi_i64_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl __fixtfdi ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call i64 @llvm.experimental.constrained.fptosi.i64.f128(fp128 %x, metadata !"fpexcept.strict") #0 ret i64 %val } define i64 @fptoui_i64_f128(fp128 %x) #0 { ; CHECK-LABEL: fptoui_i64_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl __fixunstfdi ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call i64 @llvm.experimental.constrained.fptoui.i64.f128(fp128 %x, metadata !"fpexcept.strict") #0 ret i64 %val } define fp128 @sitofp_f128_i32(i32 %x) #0 { ; CHECK-LABEL: sitofp_f128_i32: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl __floatsitf ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call fp128 @llvm.experimental.constrained.sitofp.f128.i32(i32 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret fp128 %val } define fp128 @uitofp_f128_i32(i32 %x) #0 { ; CHECK-LABEL: uitofp_f128_i32: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl __floatunsitf ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call fp128 @llvm.experimental.constrained.uitofp.f128.i32(i32 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret fp128 %val } define fp128 @sitofp_f128_i64(i64 %x) #0 { ; CHECK-LABEL: sitofp_f128_i64: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl __floatditf ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call fp128 @llvm.experimental.constrained.sitofp.f128.i64(i64 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret fp128 %val } define fp128 @uitofp_f128_i64(i64 %x) #0 { ; CHECK-LABEL: uitofp_f128_i64: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl __floatunditf ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call fp128 @llvm.experimental.constrained.uitofp.f128.i64(i64 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret fp128 %val } define fp128 @sitofp_f128_i128(i128 %x) #0 { ; CHECK-LABEL: sitofp_f128_i128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl __floattitf ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call fp128 @llvm.experimental.constrained.sitofp.f128.i128(i128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret fp128 %val } define fp128 @uitofp_f128_i128(i128 %x) #0 { ; CHECK-LABEL: uitofp_f128_i128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl __floatuntitf ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call fp128 @llvm.experimental.constrained.uitofp.f128.i128(i128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret fp128 %val } define fp128 @sqrt_f128(fp128 %x) #0 { ; CHECK-LABEL: sqrt_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl sqrtl ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call fp128 @llvm.experimental.constrained.sqrt.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret fp128 %val } define fp128 @powi_f128(fp128 %x, i32 %y) #0 { ; CHECK-LABEL: powi_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl __powitf2 ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call fp128 @llvm.experimental.constrained.powi.f128(fp128 %x, i32 %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret fp128 %val } define fp128 @sin_f128(fp128 %x) #0 { ; CHECK-LABEL: sin_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl sinl ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call fp128 @llvm.experimental.constrained.sin.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret fp128 %val } define fp128 @cos_f128(fp128 %x) #0 { ; CHECK-LABEL: cos_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl cosl ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call fp128 @llvm.experimental.constrained.cos.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret fp128 %val } define fp128 @tan_f128(fp128 %x) #0 { ; CHECK-LABEL: tan_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl tanl ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call fp128 @llvm.experimental.constrained.tan.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret fp128 %val } define fp128 @asin_f128(fp128 %x) #0 { ; CHECK-LABEL: asin_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl asinl ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call fp128 @llvm.experimental.constrained.asin.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret fp128 %val } define fp128 @acos_f128(fp128 %x) #0 { ; CHECK-LABEL: acos_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl acosl ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call fp128 @llvm.experimental.constrained.acos.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret fp128 %val } define fp128 @atan_f128(fp128 %x) #0 { ; CHECK-LABEL: atan_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl atanl ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call fp128 @llvm.experimental.constrained.atan.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret fp128 %val } define fp128 @atan2_f128(fp128 %x, fp128 %y) #0 { ; CHECK-LABEL: atan2_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl atan2l ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call fp128 @llvm.experimental.constrained.atan2.f128(fp128 %x, fp128 %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret fp128 %val } define fp128 @sinh_f128(fp128 %x) #0 { ; CHECK-LABEL: sinh_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl sinhl ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call fp128 @llvm.experimental.constrained.sinh.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret fp128 %val } define fp128 @cosh_f128(fp128 %x) #0 { ; CHECK-LABEL: cosh_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl coshl ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call fp128 @llvm.experimental.constrained.cosh.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret fp128 %val } define fp128 @tanh_f128(fp128 %x) #0 { ; CHECK-LABEL: tanh_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl tanhl ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call fp128 @llvm.experimental.constrained.tanh.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret fp128 %val } define fp128 @pow_f128(fp128 %x, fp128 %y) #0 { ; CHECK-LABEL: pow_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl powl ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call fp128 @llvm.experimental.constrained.pow.f128(fp128 %x, fp128 %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret fp128 %val } define fp128 @log_f128(fp128 %x) #0 { ; CHECK-LABEL: log_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl logl ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call fp128 @llvm.experimental.constrained.log.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret fp128 %val } define fp128 @log10_f128(fp128 %x) #0 { ; CHECK-LABEL: log10_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl log10l ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call fp128 @llvm.experimental.constrained.log10.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret fp128 %val } define fp128 @log2_f128(fp128 %x) #0 { ; CHECK-LABEL: log2_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl log2l ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call fp128 @llvm.experimental.constrained.log2.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret fp128 %val } define fp128 @exp_f128(fp128 %x) #0 { ; CHECK-LABEL: exp_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl expl ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call fp128 @llvm.experimental.constrained.exp.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret fp128 %val } define fp128 @exp2_f128(fp128 %x) #0 { ; CHECK-LABEL: exp2_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl exp2l ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call fp128 @llvm.experimental.constrained.exp2.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret fp128 %val } define fp128 @rint_f128(fp128 %x) #0 { ; CHECK-LABEL: rint_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl rintl ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call fp128 @llvm.experimental.constrained.rint.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret fp128 %val } define fp128 @nearbyint_f128(fp128 %x) #0 { ; CHECK-LABEL: nearbyint_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl nearbyintl ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call fp128 @llvm.experimental.constrained.nearbyint.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret fp128 %val } define i32 @lrint_f128(fp128 %x) #0 { ; CHECK-LABEL: lrint_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl lrintl ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call i32 @llvm.experimental.constrained.lrint.i32.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret i32 %val } define i64 @llrint_f128(fp128 %x) #0 { ; CHECK-LABEL: llrint_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl llrintl ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call i64 @llvm.experimental.constrained.llrint.i64.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret i64 %val } define fp128 @maxnum_f128(fp128 %x, fp128 %y) #0 { ; CHECK-LABEL: maxnum_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl fmaxl ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call fp128 @llvm.experimental.constrained.maxnum.f128(fp128 %x, fp128 %y, metadata !"fpexcept.strict") #0 ret fp128 %val } define fp128 @minnum_f128(fp128 %x, fp128 %y) #0 { ; CHECK-LABEL: minnum_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl fminl ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call fp128 @llvm.experimental.constrained.minnum.f128(fp128 %x, fp128 %y, metadata !"fpexcept.strict") #0 ret fp128 %val } define fp128 @ceil_f128(fp128 %x) #0 { ; CHECK-LABEL: ceil_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl ceill ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call fp128 @llvm.experimental.constrained.ceil.f128(fp128 %x, metadata !"fpexcept.strict") #0 ret fp128 %val } define fp128 @floor_f128(fp128 %x) #0 { ; CHECK-LABEL: floor_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl floorl ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call fp128 @llvm.experimental.constrained.floor.f128(fp128 %x, metadata !"fpexcept.strict") #0 ret fp128 %val } define i32 @lround_f128(fp128 %x) #0 { ; CHECK-LABEL: lround_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl lroundl ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call i32 @llvm.experimental.constrained.lround.i32.f128(fp128 %x, metadata !"fpexcept.strict") #0 ret i32 %val } define i64 @llround_f128(fp128 %x) #0 { ; CHECK-LABEL: llround_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl llroundl ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call i64 @llvm.experimental.constrained.llround.i64.f128(fp128 %x, metadata !"fpexcept.strict") #0 ret i64 %val } define fp128 @round_f128(fp128 %x) #0 { ; CHECK-LABEL: round_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl roundl ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call fp128 @llvm.experimental.constrained.round.f128(fp128 %x, metadata !"fpexcept.strict") #0 ret fp128 %val } define fp128 @trunc_f128(fp128 %x) #0 { ; CHECK-LABEL: trunc_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl truncl ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call fp128 @llvm.experimental.constrained.trunc.f128(fp128 %x, metadata !"fpexcept.strict") #0 ret fp128 %val } define i32 @fcmp_olt_f128(fp128 %a, fp128 %b) #0 { ; CHECK-LABEL: fcmp_olt_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl __lttf2 ; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: cset w0, lt ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"olt", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmp_ole_f128(fp128 %a, fp128 %b) #0 { ; CHECK-LABEL: fcmp_ole_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl __letf2 ; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: cset w0, le ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"ole", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmp_ogt_f128(fp128 %a, fp128 %b) #0 { ; CHECK-LABEL: fcmp_ogt_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl __gttf2 ; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: cset w0, gt ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"ogt", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmp_oge_f128(fp128 %a, fp128 %b) #0 { ; CHECK-LABEL: fcmp_oge_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl __getf2 ; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: cset w0, ge ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"oge", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmp_oeq_f128(fp128 %a, fp128 %b) #0 { ; CHECK-LABEL: fcmp_oeq_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl __eqtf2 ; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: cset w0, eq ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"oeq", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmp_one_f128(fp128 %a, fp128 %b) #0 { ; CHECK-LABEL: fcmp_one_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #48 ; CHECK-NEXT: stp x30, x19, [sp, #32] // 16-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: stp q0, q1, [sp] // 32-byte Folded Spill ; CHECK-NEXT: bl __eqtf2 ; CHECK-NEXT: ldp q0, q1, [sp] // 32-byte Folded Reload ; CHECK-NEXT: mov w19, w0 ; CHECK-NEXT: bl __unordtf2 ; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: ccmp w19, #0, #4, eq ; CHECK-NEXT: ldp x30, x19, [sp, #32] // 16-byte Folded Reload ; CHECK-NEXT: cset w0, ne ; CHECK-NEXT: add sp, sp, #48 ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"one", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmp_ult_f128(fp128 %a, fp128 %b) #0 { ; CHECK-LABEL: fcmp_ult_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl __getf2 ; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: cset w0, lt ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"ult", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmp_ule_f128(fp128 %a, fp128 %b) #0 { ; CHECK-LABEL: fcmp_ule_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl __gttf2 ; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: cset w0, le ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"ule", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmp_ugt_f128(fp128 %a, fp128 %b) #0 { ; CHECK-LABEL: fcmp_ugt_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl __letf2 ; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: cset w0, gt ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"ugt", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmp_uge_f128(fp128 %a, fp128 %b) #0 { ; CHECK-LABEL: fcmp_uge_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl __lttf2 ; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: cset w0, ge ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"uge", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmp_ueq_f128(fp128 %a, fp128 %b) #0 { ; CHECK-LABEL: fcmp_ueq_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #48 ; CHECK-NEXT: stp x30, x19, [sp, #32] // 16-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: stp q0, q1, [sp] // 32-byte Folded Spill ; CHECK-NEXT: bl __eqtf2 ; CHECK-NEXT: ldp q0, q1, [sp] // 32-byte Folded Reload ; CHECK-NEXT: mov w19, w0 ; CHECK-NEXT: bl __unordtf2 ; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: ccmp w19, #0, #4, eq ; CHECK-NEXT: ldp x30, x19, [sp, #32] // 16-byte Folded Reload ; CHECK-NEXT: cset w0, eq ; CHECK-NEXT: add sp, sp, #48 ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"ueq", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmp_une_f128(fp128 %a, fp128 %b) #0 { ; CHECK-LABEL: fcmp_une_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl __netf2 ; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: cset w0, ne ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"une", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmps_olt_f128(fp128 %a, fp128 %b) #0 { ; CHECK-LABEL: fcmps_olt_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl __lttf2 ; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: cset w0, lt ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"olt", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmps_ole_f128(fp128 %a, fp128 %b) #0 { ; CHECK-LABEL: fcmps_ole_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl __letf2 ; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: cset w0, le ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"ole", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmps_ogt_f128(fp128 %a, fp128 %b) #0 { ; CHECK-LABEL: fcmps_ogt_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl __gttf2 ; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: cset w0, gt ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"ogt", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmps_oge_f128(fp128 %a, fp128 %b) #0 { ; CHECK-LABEL: fcmps_oge_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl __getf2 ; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: cset w0, ge ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"oge", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmps_oeq_f128(fp128 %a, fp128 %b) #0 { ; CHECK-LABEL: fcmps_oeq_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl __eqtf2 ; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: cset w0, eq ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"oeq", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmps_one_f128(fp128 %a, fp128 %b) #0 { ; CHECK-LABEL: fcmps_one_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #48 ; CHECK-NEXT: stp x30, x19, [sp, #32] // 16-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: stp q0, q1, [sp] // 32-byte Folded Spill ; CHECK-NEXT: bl __eqtf2 ; CHECK-NEXT: ldp q0, q1, [sp] // 32-byte Folded Reload ; CHECK-NEXT: mov w19, w0 ; CHECK-NEXT: bl __unordtf2 ; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: ccmp w19, #0, #4, eq ; CHECK-NEXT: ldp x30, x19, [sp, #32] // 16-byte Folded Reload ; CHECK-NEXT: cset w0, ne ; CHECK-NEXT: add sp, sp, #48 ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"one", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmps_ult_f128(fp128 %a, fp128 %b) #0 { ; CHECK-LABEL: fcmps_ult_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl __getf2 ; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: cset w0, lt ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"ult", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmps_ule_f128(fp128 %a, fp128 %b) #0 { ; CHECK-LABEL: fcmps_ule_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl __gttf2 ; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: cset w0, le ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"ule", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmps_ugt_f128(fp128 %a, fp128 %b) #0 { ; CHECK-LABEL: fcmps_ugt_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl __letf2 ; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: cset w0, gt ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"ugt", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmps_uge_f128(fp128 %a, fp128 %b) #0 { ; CHECK-LABEL: fcmps_uge_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl __lttf2 ; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: cset w0, ge ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"uge", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmps_ueq_f128(fp128 %a, fp128 %b) #0 { ; CHECK-LABEL: fcmps_ueq_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #48 ; CHECK-NEXT: stp x30, x19, [sp, #32] // 16-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: stp q0, q1, [sp] // 32-byte Folded Spill ; CHECK-NEXT: bl __eqtf2 ; CHECK-NEXT: ldp q0, q1, [sp] // 32-byte Folded Reload ; CHECK-NEXT: mov w19, w0 ; CHECK-NEXT: bl __unordtf2 ; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: ccmp w19, #0, #4, eq ; CHECK-NEXT: ldp x30, x19, [sp, #32] // 16-byte Folded Reload ; CHECK-NEXT: cset w0, eq ; CHECK-NEXT: add sp, sp, #48 ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"ueq", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @fcmps_une_f128(fp128 %a, fp128 %b) #0 { ; CHECK-LABEL: fcmps_une_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl __netf2 ; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: cset w0, ne ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"une", metadata !"fpexcept.strict") #0 %conv = zext i1 %cmp to i32 ret i32 %conv } ; Intrinsics to convert between floating-point types define float @fptrunc_f32_f64(double %x) #0 { ; CHECK-LABEL: fptrunc_f32_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: fcvt s0, d0 ; CHECK-NEXT: ret %val = call float @llvm.experimental.constrained.fptrunc.f32.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret float %val } define float @fptrunc_f32_f128(fp128 %x) #0 { ; CHECK-LABEL: fptrunc_f32_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl __trunctfsf2 ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call float @llvm.experimental.constrained.fptrunc.f32.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret float %val } define double @fptrunc_f64_f128(fp128 %x) #0 { ; CHECK-LABEL: fptrunc_f64_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl __trunctfdf2 ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call double @llvm.experimental.constrained.fptrunc.f64.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret double %val } define double @fpext_f64_f32(float %x) #0 { ; CHECK-LABEL: fpext_f64_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: fcvt d0, s0 ; CHECK-NEXT: ret %val = call double @llvm.experimental.constrained.fpext.f64.f32(float %x, metadata !"fpexcept.strict") #0 ret double %val } define fp128 @fpext_f128_f32(float %x) #0 { ; CHECK-LABEL: fpext_f128_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl __extendsftf2 ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call fp128 @llvm.experimental.constrained.fpext.f128.f32(float %x, metadata !"fpexcept.strict") #0 ret fp128 %val } define fp128 @fpext_f128_f64(double %x) #0 { ; CHECK-LABEL: fpext_f128_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl __extenddftf2 ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call fp128 @llvm.experimental.constrained.fpext.f128.f64(double %x, metadata !"fpexcept.strict") #0 ret fp128 %val } define <1 x double> @sin_v1f64(<1 x double> %x, <1 x double> %y) #0 { ; CHECK-LABEL: sin_v1f64: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl sin ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call <1 x double> @llvm.experimental.constrained.sin.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret <1 x double> %val } define <1 x double> @cos_v1f64(<1 x double> %x, <1 x double> %y) #0 { ; CHECK-LABEL: cos_v1f64: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl cos ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call <1 x double> @llvm.experimental.constrained.cos.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret <1 x double> %val } define <1 x double> @tan_v1f64(<1 x double> %x, <1 x double> %y) #0 { ; CHECK-LABEL: tan_v1f64: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl tan ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call <1 x double> @llvm.experimental.constrained.tan.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret <1 x double> %val } define <1 x double> @asin_v1f64(<1 x double> %x, <1 x double> %y) #0 { ; CHECK-LABEL: asin_v1f64: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl asin ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call <1 x double> @llvm.experimental.constrained.asin.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret <1 x double> %val } define <1 x double> @acos_v1f64(<1 x double> %x, <1 x double> %y) #0 { ; CHECK-LABEL: acos_v1f64: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl acos ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call <1 x double> @llvm.experimental.constrained.acos.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret <1 x double> %val } define <1 x double> @atan_v1f64(<1 x double> %x, <1 x double> %y) #0 { ; CHECK-LABEL: atan_v1f64: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl atan ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call <1 x double> @llvm.experimental.constrained.atan.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret <1 x double> %val } define <1 x double> @atan2_v1f64(<1 x double> %x, <1 x double> %y) #0 { ; CHECK-LABEL: atan2_v1f64: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl atan2 ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call <1 x double> @llvm.experimental.constrained.atan2.v1f64(<1 x double> %x, <1 x double> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret <1 x double> %val } define <1 x double> @sinh_v1f64(<1 x double> %x, <1 x double> %y) #0 { ; CHECK-LABEL: sinh_v1f64: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl sinh ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call <1 x double> @llvm.experimental.constrained.sinh.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret <1 x double> %val } define <1 x double> @cosh_v1f64(<1 x double> %x, <1 x double> %y) #0 { ; CHECK-LABEL: cosh_v1f64: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl cosh ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call <1 x double> @llvm.experimental.constrained.cosh.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret <1 x double> %val } define <1 x double> @tanh_v1f64(<1 x double> %x, <1 x double> %y) #0 { ; CHECK-LABEL: tanh_v1f64: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl tanh ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call <1 x double> @llvm.experimental.constrained.tanh.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret <1 x double> %val } define <1 x double> @pow_v1f64(<1 x double> %x, <1 x double> %y) #0 { ; CHECK-LABEL: pow_v1f64: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl pow ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call <1 x double> @llvm.experimental.constrained.pow.v1f64(<1 x double> %x, <1 x double> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret <1 x double> %val } define <1 x double> @log_v1f64(<1 x double> %x, <1 x double> %y) #0 { ; CHECK-LABEL: log_v1f64: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl log ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call <1 x double> @llvm.experimental.constrained.log.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret <1 x double> %val } define <1 x double> @log2_v1f64(<1 x double> %x, <1 x double> %y) #0 { ; CHECK-LABEL: log2_v1f64: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl log2 ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call <1 x double> @llvm.experimental.constrained.log2.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret <1 x double> %val } define <1 x double> @log10_v1f64(<1 x double> %x, <1 x double> %y) #0 { ; CHECK-LABEL: log10_v1f64: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl log10 ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call <1 x double> @llvm.experimental.constrained.log10.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret <1 x double> %val } define <1 x double> @exp_v1f64(<1 x double> %x, <1 x double> %y) #0 { ; CHECK-LABEL: exp_v1f64: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl exp ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call <1 x double> @llvm.experimental.constrained.exp.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret <1 x double> %val } define <1 x double> @exp2_v1f64(<1 x double> %x, <1 x double> %y) #0 { ; CHECK-LABEL: exp2_v1f64: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl exp2 ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %val = call <1 x double> @llvm.experimental.constrained.exp2.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret <1 x double> %val } attributes #0 = { strictfp } declare float @llvm.experimental.constrained.fadd.f32(float, float, metadata, metadata) declare float @llvm.experimental.constrained.fsub.f32(float, float, metadata, metadata) declare float @llvm.experimental.constrained.fmul.f32(float, float, metadata, metadata) declare float @llvm.experimental.constrained.fdiv.f32(float, float, metadata, metadata) declare float @llvm.experimental.constrained.frem.f32(float, float, metadata, metadata) declare float @llvm.experimental.constrained.fma.f32(float, float, float, metadata, metadata) declare i32 @llvm.experimental.constrained.fptosi.i32.f32(float, metadata) declare i32 @llvm.experimental.constrained.fptoui.i32.f32(float, metadata) declare i64 @llvm.experimental.constrained.fptosi.i64.f32(float, metadata) declare i64 @llvm.experimental.constrained.fptoui.i64.f32(float, metadata) declare float @llvm.experimental.constrained.sitofp.f32.i32(i32, metadata, metadata) declare float @llvm.experimental.constrained.uitofp.f32.i32(i32, metadata, metadata) declare float @llvm.experimental.constrained.sitofp.f32.i64(i64, metadata, metadata) declare float @llvm.experimental.constrained.uitofp.f32.i64(i64, metadata, metadata) declare float @llvm.experimental.constrained.sitofp.f32.i128(i128, metadata, metadata) declare float @llvm.experimental.constrained.uitofp.f32.i128(i128, metadata, metadata) declare float @llvm.experimental.constrained.sqrt.f32(float, metadata, metadata) declare float @llvm.experimental.constrained.powi.f32(float, i32, metadata, metadata) declare float @llvm.experimental.constrained.sin.f32(float, metadata, metadata) declare float @llvm.experimental.constrained.cos.f32(float, metadata, metadata) declare float @llvm.experimental.constrained.tan.f32(float, metadata, metadata) declare float @llvm.experimental.constrained.asin.f32(float, metadata, metadata) declare float @llvm.experimental.constrained.acos.f32(float, metadata, metadata) declare float @llvm.experimental.constrained.atan.f32(float, metadata, metadata) declare float @llvm.experimental.constrained.atan2.f32(float, float, metadata, metadata) declare float @llvm.experimental.constrained.sinh.f32(float, metadata, metadata) declare float @llvm.experimental.constrained.cosh.f32(float, metadata, metadata) declare float @llvm.experimental.constrained.tanh.f32(float, metadata, metadata) declare float @llvm.experimental.constrained.pow.f32(float, float, metadata, metadata) declare float @llvm.experimental.constrained.log.f32(float, metadata, metadata) declare float @llvm.experimental.constrained.log10.f32(float, metadata, metadata) declare float @llvm.experimental.constrained.log2.f32(float, metadata, metadata) declare float @llvm.experimental.constrained.exp.f32(float, metadata, metadata) declare float @llvm.experimental.constrained.exp2.f32(float, metadata, metadata) declare float @llvm.experimental.constrained.rint.f32(float, metadata, metadata) declare float @llvm.experimental.constrained.nearbyint.f32(float, metadata, metadata) declare i32 @llvm.experimental.constrained.lrint.i32.f32(float, metadata, metadata) declare i64 @llvm.experimental.constrained.llrint.i64.f32(float, metadata, metadata) declare float @llvm.experimental.constrained.maxnum.f32(float, float, metadata) declare float @llvm.experimental.constrained.minnum.f32(float, float, metadata) declare float @llvm.experimental.constrained.maximum.f32(float, float, metadata) declare float @llvm.experimental.constrained.minimum.f32(float, float, metadata) declare float @llvm.experimental.constrained.ceil.f32(float, metadata) declare float @llvm.experimental.constrained.floor.f32(float, metadata) declare i32 @llvm.experimental.constrained.lround.i32.f32(float, metadata) declare i64 @llvm.experimental.constrained.llround.i64.f32(float, metadata) declare float @llvm.experimental.constrained.round.f32(float, metadata) declare float @llvm.experimental.constrained.roundeven.f32(float, metadata) declare float @llvm.experimental.constrained.trunc.f32(float, metadata) declare i1 @llvm.experimental.constrained.fcmps.f32(float, float, metadata, metadata) declare i1 @llvm.experimental.constrained.fcmp.f32(float, float, metadata, metadata) declare double @llvm.experimental.constrained.fadd.f64(double, double, metadata, metadata) declare double @llvm.experimental.constrained.fsub.f64(double, double, metadata, metadata) declare double @llvm.experimental.constrained.fmul.f64(double, double, metadata, metadata) declare double @llvm.experimental.constrained.fdiv.f64(double, double, metadata, metadata) declare double @llvm.experimental.constrained.frem.f64(double, double, metadata, metadata) declare double @llvm.experimental.constrained.fma.f64(double, double, double, metadata, metadata) declare i32 @llvm.experimental.constrained.fptosi.i32.f64(double, metadata) declare i32 @llvm.experimental.constrained.fptoui.i32.f64(double, metadata) declare i64 @llvm.experimental.constrained.fptosi.i64.f64(double, metadata) declare i64 @llvm.experimental.constrained.fptoui.i64.f64(double, metadata) declare double @llvm.experimental.constrained.sitofp.f64.i32(i32, metadata, metadata) declare double @llvm.experimental.constrained.uitofp.f64.i32(i32, metadata, metadata) declare double @llvm.experimental.constrained.sitofp.f64.i64(i64, metadata, metadata) declare double @llvm.experimental.constrained.uitofp.f64.i64(i64, metadata, metadata) declare double @llvm.experimental.constrained.sitofp.f64.i128(i128, metadata, metadata) declare double @llvm.experimental.constrained.uitofp.f64.i128(i128, metadata, metadata) declare double @llvm.experimental.constrained.sqrt.f64(double, metadata, metadata) declare double @llvm.experimental.constrained.powi.f64(double, i32, metadata, metadata) declare double @llvm.experimental.constrained.sin.f64(double, metadata, metadata) declare double @llvm.experimental.constrained.cos.f64(double, metadata, metadata) declare double @llvm.experimental.constrained.tan.f64(double, metadata, metadata) declare double @llvm.experimental.constrained.asin.f64(double, metadata, metadata) declare double @llvm.experimental.constrained.acos.f64(double, metadata, metadata) declare double @llvm.experimental.constrained.atan.f64(double, metadata, metadata) declare double @llvm.experimental.constrained.atan2.f64(double, double, metadata, metadata) declare double @llvm.experimental.constrained.sinh.f64(double, metadata, metadata) declare double @llvm.experimental.constrained.cosh.f64(double, metadata, metadata) declare double @llvm.experimental.constrained.tanh.f64(double, metadata, metadata) declare double @llvm.experimental.constrained.pow.f64(double, double, metadata, metadata) declare double @llvm.experimental.constrained.log.f64(double, metadata, metadata) declare double @llvm.experimental.constrained.log10.f64(double, metadata, metadata) declare double @llvm.experimental.constrained.log2.f64(double, metadata, metadata) declare double @llvm.experimental.constrained.exp.f64(double, metadata, metadata) declare double @llvm.experimental.constrained.exp2.f64(double, metadata, metadata) declare double @llvm.experimental.constrained.rint.f64(double, metadata, metadata) declare double @llvm.experimental.constrained.nearbyint.f64(double, metadata, metadata) declare i32 @llvm.experimental.constrained.lrint.i32.f64(double, metadata, metadata) declare i64 @llvm.experimental.constrained.llrint.i64.f64(double, metadata, metadata) declare double @llvm.experimental.constrained.maxnum.f64(double, double, metadata) declare double @llvm.experimental.constrained.minnum.f64(double, double, metadata) declare double @llvm.experimental.constrained.maximum.f64(double, double, metadata) declare double @llvm.experimental.constrained.minimum.f64(double, double, metadata) declare double @llvm.experimental.constrained.ceil.f64(double, metadata) declare double @llvm.experimental.constrained.floor.f64(double, metadata) declare i32 @llvm.experimental.constrained.lround.i32.f64(double, metadata) declare i64 @llvm.experimental.constrained.llround.i64.f64(double, metadata) declare double @llvm.experimental.constrained.round.f64(double, metadata) declare double @llvm.experimental.constrained.roundeven.f64(double, metadata) declare double @llvm.experimental.constrained.trunc.f64(double, metadata) declare i1 @llvm.experimental.constrained.fcmps.f64(double, double, metadata, metadata) declare i1 @llvm.experimental.constrained.fcmp.f64(double, double, metadata, metadata) declare fp128 @llvm.experimental.constrained.fadd.f128(fp128, fp128, metadata, metadata) declare fp128 @llvm.experimental.constrained.fsub.f128(fp128, fp128, metadata, metadata) declare fp128 @llvm.experimental.constrained.fmul.f128(fp128, fp128, metadata, metadata) declare fp128 @llvm.experimental.constrained.fdiv.f128(fp128, fp128, metadata, metadata) declare fp128 @llvm.experimental.constrained.frem.f128(fp128, fp128, metadata, metadata) declare fp128 @llvm.experimental.constrained.fma.f128(fp128, fp128, fp128, metadata, metadata) declare i32 @llvm.experimental.constrained.fptosi.i32.f128(fp128, metadata) declare i32 @llvm.experimental.constrained.fptoui.i32.f128(fp128, metadata) declare i64 @llvm.experimental.constrained.fptosi.i64.f128(fp128, metadata) declare i64 @llvm.experimental.constrained.fptoui.i64.f128(fp128, metadata) declare fp128 @llvm.experimental.constrained.sitofp.f128.i32(i32, metadata, metadata) declare fp128 @llvm.experimental.constrained.uitofp.f128.i32(i32, metadata, metadata) declare fp128 @llvm.experimental.constrained.sitofp.f128.i64(i64, metadata, metadata) declare fp128 @llvm.experimental.constrained.uitofp.f128.i64(i64, metadata, metadata) declare fp128 @llvm.experimental.constrained.sitofp.f128.i128(i128, metadata, metadata) declare fp128 @llvm.experimental.constrained.uitofp.f128.i128(i128, metadata, metadata) declare fp128 @llvm.experimental.constrained.sqrt.f128(fp128, metadata, metadata) declare fp128 @llvm.experimental.constrained.powi.f128(fp128, i32, metadata, metadata) declare fp128 @llvm.experimental.constrained.sin.f128(fp128, metadata, metadata) declare fp128 @llvm.experimental.constrained.cos.f128(fp128, metadata, metadata) declare fp128 @llvm.experimental.constrained.tan.f128(fp128, metadata, metadata) declare fp128 @llvm.experimental.constrained.asin.f128(fp128, metadata, metadata) declare fp128 @llvm.experimental.constrained.acos.f128(fp128, metadata, metadata) declare fp128 @llvm.experimental.constrained.atan.f128(fp128, metadata, metadata) declare fp128 @llvm.experimental.constrained.atan2.f128(fp128, fp128, metadata, metadata) declare fp128 @llvm.experimental.constrained.sinh.f128(fp128, metadata, metadata) declare fp128 @llvm.experimental.constrained.cosh.f128(fp128, metadata, metadata) declare fp128 @llvm.experimental.constrained.tanh.f128(fp128, metadata, metadata) declare fp128 @llvm.experimental.constrained.pow.f128(fp128, fp128, metadata, metadata) declare fp128 @llvm.experimental.constrained.log.f128(fp128, metadata, metadata) declare fp128 @llvm.experimental.constrained.log10.f128(fp128, metadata, metadata) declare fp128 @llvm.experimental.constrained.log2.f128(fp128, metadata, metadata) declare fp128 @llvm.experimental.constrained.exp.f128(fp128, metadata, metadata) declare fp128 @llvm.experimental.constrained.exp2.f128(fp128, metadata, metadata) declare fp128 @llvm.experimental.constrained.rint.f128(fp128, metadata, metadata) declare fp128 @llvm.experimental.constrained.nearbyint.f128(fp128, metadata, metadata) declare i32 @llvm.experimental.constrained.lrint.i32.f128(fp128, metadata, metadata) declare i64 @llvm.experimental.constrained.llrint.i64.f128(fp128, metadata, metadata) declare fp128 @llvm.experimental.constrained.maxnum.f128(fp128, fp128, metadata) declare fp128 @llvm.experimental.constrained.minnum.f128(fp128, fp128, metadata) declare fp128 @llvm.experimental.constrained.ceil.f128(fp128, metadata) declare fp128 @llvm.experimental.constrained.floor.f128(fp128, metadata) declare i32 @llvm.experimental.constrained.lround.i32.f128(fp128, metadata) declare i64 @llvm.experimental.constrained.llround.i64.f128(fp128, metadata) declare fp128 @llvm.experimental.constrained.round.f128(fp128, metadata) declare fp128 @llvm.experimental.constrained.trunc.f128(fp128, metadata) declare i1 @llvm.experimental.constrained.fcmps.f128(fp128, fp128, metadata, metadata) declare i1 @llvm.experimental.constrained.fcmp.f128(fp128, fp128, metadata, metadata) declare float @llvm.experimental.constrained.fptrunc.f32.f64(double, metadata, metadata) declare float @llvm.experimental.constrained.fptrunc.f32.f128(fp128, metadata, metadata) declare double @llvm.experimental.constrained.fptrunc.f64.f128(fp128, metadata, metadata) declare double @llvm.experimental.constrained.fpext.f64.f32(float, metadata) declare fp128 @llvm.experimental.constrained.fpext.f128.f32(float, metadata) declare fp128 @llvm.experimental.constrained.fpext.f128.f64(double, metadata) ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: ; CHECK-GI: {{.*}} ; CHECK-SD: {{.*}}