// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 // RUN: %clang_cc1 -triple arm64-apple-ios7 -target-feature +neon -ffreestanding -flax-vector-conversions=none -emit-llvm -o - %s | FileCheck %s // REQUIRES: aarch64-registered-target || arm-registered-target #include // CHECK-LABEL: define <2 x double> @rnd5( // CHECK-SAME: <2 x double> noundef [[A:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-NEXT: [[ENTRY:.*:]] // CHECK-NEXT: [[__P0_ADDR_I:%.*]] = alloca <2 x double>, align 16 // CHECK-NEXT: [[__RET_I:%.*]] = alloca <2 x double>, align 16 // CHECK-NEXT: [[REF_TMP_I:%.*]] = alloca <16 x i8>, align 16 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca <2 x double>, align 16 // CHECK-NEXT: store <2 x double> [[A]], ptr [[A_ADDR]], align 16 // CHECK-NEXT: [[TMP0:%.*]] = load <2 x double>, ptr [[A_ADDR]], align 16 // CHECK-NEXT: store <2 x double> [[TMP0]], ptr [[__P0_ADDR_I]], align 16 // CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr [[__P0_ADDR_I]], align 16 // CHECK-NEXT: [[VRNDZ_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x double> // CHECK-NEXT: [[VRNDZ1_I:%.*]] = call <2 x double> @llvm.trunc.v2f64(<2 x double> [[VRNDZ_I]]) // CHECK-NEXT: store <2 x double> [[VRNDZ1_I]], ptr [[REF_TMP_I]], align 16 // CHECK-NEXT: [[TMP2:%.*]] = load <2 x double>, ptr [[REF_TMP_I]], align 16 // CHECK-NEXT: store <2 x double> [[TMP2]], ptr [[__RET_I]], align 16 // CHECK-NEXT: [[TMP3:%.*]] = load <2 x double>, ptr [[__RET_I]], align 16 // CHECK-NEXT: ret <2 x double> [[TMP3]] // float64x2_t rnd5(float64x2_t a) { return vrndq_f64(a); } // CHECK-LABEL: define <2 x double> @rnd9( // CHECK-SAME: <2 x double> noundef [[A:%.*]]) #[[ATTR0]] { // CHECK-NEXT: [[ENTRY:.*:]] // CHECK-NEXT: [[__P0_ADDR_I:%.*]] = alloca <2 x double>, align 16 // CHECK-NEXT: [[__RET_I:%.*]] = alloca <2 x double>, align 16 // CHECK-NEXT: [[REF_TMP_I:%.*]] = alloca <16 x i8>, align 16 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca <2 x double>, align 16 // CHECK-NEXT: store <2 x double> [[A]], ptr [[A_ADDR]], align 16 // CHECK-NEXT: [[TMP0:%.*]] = load <2 x double>, ptr [[A_ADDR]], align 16 // CHECK-NEXT: store <2 x double> [[TMP0]], ptr [[__P0_ADDR_I]], align 16 // CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr [[__P0_ADDR_I]], align 16 // CHECK-NEXT: [[VRNDN_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x double> // CHECK-NEXT: [[VRNDN1_I:%.*]] = call <2 x double> @llvm.roundeven.v2f64(<2 x double> [[VRNDN_I]]) // CHECK-NEXT: store <2 x double> [[VRNDN1_I]], ptr [[REF_TMP_I]], align 16 // CHECK-NEXT: [[TMP2:%.*]] = load <2 x double>, ptr [[REF_TMP_I]], align 16 // CHECK-NEXT: store <2 x double> [[TMP2]], ptr [[__RET_I]], align 16 // CHECK-NEXT: [[TMP3:%.*]] = load <2 x double>, ptr [[__RET_I]], align 16 // CHECK-NEXT: ret <2 x double> [[TMP3]] // float64x2_t rnd9(float64x2_t a) { return vrndnq_f64(a); } // CHECK-LABEL: define <2 x double> @rnd13( // CHECK-SAME: <2 x double> noundef [[A:%.*]]) #[[ATTR0]] { // CHECK-NEXT: [[ENTRY:.*:]] // CHECK-NEXT: [[__P0_ADDR_I:%.*]] = alloca <2 x double>, align 16 // CHECK-NEXT: [[__RET_I:%.*]] = alloca <2 x double>, align 16 // CHECK-NEXT: [[REF_TMP_I:%.*]] = alloca <16 x i8>, align 16 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca <2 x double>, align 16 // CHECK-NEXT: store <2 x double> [[A]], ptr [[A_ADDR]], align 16 // CHECK-NEXT: [[TMP0:%.*]] = load <2 x double>, ptr [[A_ADDR]], align 16 // CHECK-NEXT: store <2 x double> [[TMP0]], ptr [[__P0_ADDR_I]], align 16 // CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr [[__P0_ADDR_I]], align 16 // CHECK-NEXT: [[VRNDM_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x double> // CHECK-NEXT: [[VRNDM1_I:%.*]] = call <2 x double> @llvm.floor.v2f64(<2 x double> [[VRNDM_I]]) // CHECK-NEXT: store <2 x double> [[VRNDM1_I]], ptr [[REF_TMP_I]], align 16 // CHECK-NEXT: [[TMP2:%.*]] = load <2 x double>, ptr [[REF_TMP_I]], align 16 // CHECK-NEXT: store <2 x double> [[TMP2]], ptr [[__RET_I]], align 16 // CHECK-NEXT: [[TMP3:%.*]] = load <2 x double>, ptr [[__RET_I]], align 16 // CHECK-NEXT: ret <2 x double> [[TMP3]] // float64x2_t rnd13(float64x2_t a) { return vrndmq_f64(a); } // CHECK-LABEL: define <2 x double> @rnd18( // CHECK-SAME: <2 x double> noundef [[A:%.*]]) #[[ATTR0]] { // CHECK-NEXT: [[ENTRY:.*:]] // CHECK-NEXT: [[__P0_ADDR_I:%.*]] = alloca <2 x double>, align 16 // CHECK-NEXT: [[__RET_I:%.*]] = alloca <2 x double>, align 16 // CHECK-NEXT: [[REF_TMP_I:%.*]] = alloca <16 x i8>, align 16 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca <2 x double>, align 16 // CHECK-NEXT: store <2 x double> [[A]], ptr [[A_ADDR]], align 16 // CHECK-NEXT: [[TMP0:%.*]] = load <2 x double>, ptr [[A_ADDR]], align 16 // CHECK-NEXT: store <2 x double> [[TMP0]], ptr [[__P0_ADDR_I]], align 16 // CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr [[__P0_ADDR_I]], align 16 // CHECK-NEXT: [[VRNDP_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x double> // CHECK-NEXT: [[VRNDP1_I:%.*]] = call <2 x double> @llvm.ceil.v2f64(<2 x double> [[VRNDP_I]]) // CHECK-NEXT: store <2 x double> [[VRNDP1_I]], ptr [[REF_TMP_I]], align 16 // CHECK-NEXT: [[TMP2:%.*]] = load <2 x double>, ptr [[REF_TMP_I]], align 16 // CHECK-NEXT: store <2 x double> [[TMP2]], ptr [[__RET_I]], align 16 // CHECK-NEXT: [[TMP3:%.*]] = load <2 x double>, ptr [[__RET_I]], align 16 // CHECK-NEXT: ret <2 x double> [[TMP3]] // float64x2_t rnd18(float64x2_t a) { return vrndpq_f64(a); } // CHECK-LABEL: define <2 x double> @rnd22( // CHECK-SAME: <2 x double> noundef [[A:%.*]]) #[[ATTR0]] { // CHECK-NEXT: [[ENTRY:.*:]] // CHECK-NEXT: [[__P0_ADDR_I:%.*]] = alloca <2 x double>, align 16 // CHECK-NEXT: [[__RET_I:%.*]] = alloca <2 x double>, align 16 // CHECK-NEXT: [[REF_TMP_I:%.*]] = alloca <16 x i8>, align 16 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca <2 x double>, align 16 // CHECK-NEXT: store <2 x double> [[A]], ptr [[A_ADDR]], align 16 // CHECK-NEXT: [[TMP0:%.*]] = load <2 x double>, ptr [[A_ADDR]], align 16 // CHECK-NEXT: store <2 x double> [[TMP0]], ptr [[__P0_ADDR_I]], align 16 // CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr [[__P0_ADDR_I]], align 16 // CHECK-NEXT: [[VRNDA_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x double> // CHECK-NEXT: [[VRNDA1_I:%.*]] = call <2 x double> @llvm.round.v2f64(<2 x double> [[VRNDA_I]]) // CHECK-NEXT: store <2 x double> [[VRNDA1_I]], ptr [[REF_TMP_I]], align 16 // CHECK-NEXT: [[TMP2:%.*]] = load <2 x double>, ptr [[REF_TMP_I]], align 16 // CHECK-NEXT: store <2 x double> [[TMP2]], ptr [[__RET_I]], align 16 // CHECK-NEXT: [[TMP3:%.*]] = load <2 x double>, ptr [[__RET_I]], align 16 // CHECK-NEXT: ret <2 x double> [[TMP3]] // float64x2_t rnd22(float64x2_t a) { return vrndaq_f64(a); } // CHECK-LABEL: define <2 x double> @rnd25( // CHECK-SAME: <2 x double> noundef [[A:%.*]]) #[[ATTR0]] { // CHECK-NEXT: [[ENTRY:.*:]] // CHECK-NEXT: [[__P0_ADDR_I:%.*]] = alloca <2 x double>, align 16 // CHECK-NEXT: [[__RET_I:%.*]] = alloca <2 x double>, align 16 // CHECK-NEXT: [[REF_TMP_I:%.*]] = alloca <16 x i8>, align 16 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca <2 x double>, align 16 // CHECK-NEXT: store <2 x double> [[A]], ptr [[A_ADDR]], align 16 // CHECK-NEXT: [[TMP0:%.*]] = load <2 x double>, ptr [[A_ADDR]], align 16 // CHECK-NEXT: store <2 x double> [[TMP0]], ptr [[__P0_ADDR_I]], align 16 // CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr [[__P0_ADDR_I]], align 16 // CHECK-NEXT: [[VRNDX_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x double> // CHECK-NEXT: [[VRNDX1_I:%.*]] = call <2 x double> @llvm.rint.v2f64(<2 x double> [[VRNDX_I]]) // CHECK-NEXT: store <2 x double> [[VRNDX1_I]], ptr [[REF_TMP_I]], align 16 // CHECK-NEXT: [[TMP2:%.*]] = load <2 x double>, ptr [[REF_TMP_I]], align 16 // CHECK-NEXT: store <2 x double> [[TMP2]], ptr [[__RET_I]], align 16 // CHECK-NEXT: [[TMP3:%.*]] = load <2 x double>, ptr [[__RET_I]], align 16 // CHECK-NEXT: ret <2 x double> [[TMP3]] // float64x2_t rnd25(float64x2_t a) { return vrndxq_f64(a); }