// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // RUN: %clang_cc1 \ // RUN: -triple aarch64 -target-feature +neon -target-feature +bf16 \ // RUN: -disable-O0-optnone -emit-llvm -o - %s \ // RUN: | opt -S -passes=mem2reg,sroa \ // RUN: | FileCheck --check-prefixes=CHECK,CHECK-A64 %s // RUN: %clang_cc1 \ // RUN: -triple armv8.6a-arm-none-eabi -target-feature +neon \ // RUN: -target-feature +bf16 -mfloat-abi hard \ // RUN: -disable-O0-optnone -emit-llvm -o - %s \ // RUN: | opt -S -passes=mem2reg,sroa \ // RUN: | FileCheck --check-prefixes=CHECK,CHECK-A32-HARDFP %s // RUN: %clang_cc1 \ // RUN: -triple armv8.6a-arm-none-eabi -target-feature +neon \ // RUN: -target-feature +bf16 -mfloat-abi softfp \ // RUN: -disable-O0-optnone -emit-llvm -o - %s \ // RUN: | opt -S -passes=mem2reg,sroa \ // RUN: | FileCheck --check-prefixes=CHECK,CHECK-A32-SOFTFP %s // REQUIRES: arm-registered-target // REQUIRES: aarch64-registered-target #include // CHECK-A64-LABEL: @test_vcvt_f32_bf16( // CHECK-A64-NEXT: entry: // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <4 x bfloat> [[A:%.*]] to <4 x i16> // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <4 x i16> [[TMP0]] to <8 x i8> // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> // CHECK-A64-NEXT: [[TMP3:%.*]] = zext <4 x i16> [[TMP2]] to <4 x i32> // CHECK-A64-NEXT: [[VSHLL_N_I:%.*]] = shl <4 x i32> [[TMP3]], splat (i32 16) // CHECK-A64-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[VSHLL_N_I]] to <4 x float> // CHECK-A64-NEXT: ret <4 x float> [[TMP4]] // // CHECK-A32-HARDFP-LABEL: @test_vcvt_f32_bf16( // CHECK-A32-HARDFP-NEXT: entry: // CHECK-A32-HARDFP-NEXT: [[TMP0:%.*]] = bitcast <4 x bfloat> [[A:%.*]] to <4 x i16> // CHECK-A32-HARDFP-NEXT: [[TMP1:%.*]] = bitcast <4 x i16> [[TMP0]] to <8 x i8> // CHECK-A32-HARDFP-NEXT: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> // CHECK-A32-HARDFP-NEXT: [[TMP3:%.*]] = zext <4 x i16> [[TMP2]] to <4 x i32> // CHECK-A32-HARDFP-NEXT: [[VSHLL_N_I:%.*]] = shl <4 x i32> [[TMP3]], splat (i32 16) // CHECK-A32-HARDFP-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[VSHLL_N_I]] to <4 x float> // CHECK-A32-HARDFP-NEXT: ret <4 x float> [[TMP4]] // // CHECK-A32-SOFTFP-LABEL: @test_vcvt_f32_bf16( // CHECK-A32-SOFTFP-NEXT: entry: // CHECK-A32-SOFTFP-NEXT: [[TMP0:%.*]] = bitcast <2 x i32> [[A_COERCE:%.*]] to <4 x bfloat> // CHECK-A32-SOFTFP-NEXT: [[TMP1:%.*]] = bitcast <4 x bfloat> [[TMP0]] to <2 x i32> // CHECK-A32-SOFTFP-NEXT: [[TMP2:%.*]] = bitcast <2 x i32> [[TMP1]] to <4 x bfloat> // CHECK-A32-SOFTFP-NEXT: [[TMP3:%.*]] = bitcast <4 x bfloat> [[TMP2]] to <4 x i16> // CHECK-A32-SOFTFP-NEXT: [[TMP4:%.*]] = bitcast <4 x i16> [[TMP3]] to <8 x i8> // CHECK-A32-SOFTFP-NEXT: [[TMP5:%.*]] = bitcast <8 x i8> [[TMP4]] to <4 x i16> // CHECK-A32-SOFTFP-NEXT: [[TMP6:%.*]] = zext <4 x i16> [[TMP5]] to <4 x i32> // CHECK-A32-SOFTFP-NEXT: [[VSHLL_N_I:%.*]] = shl <4 x i32> [[TMP6]], splat (i32 16) // CHECK-A32-SOFTFP-NEXT: [[TMP7:%.*]] = bitcast <4 x i32> [[VSHLL_N_I]] to <4 x float> // CHECK-A32-SOFTFP-NEXT: ret <4 x float> [[TMP7]] // float32x4_t test_vcvt_f32_bf16(bfloat16x4_t a) { return vcvt_f32_bf16(a); } // CHECK-A64-LABEL: @test_vcvtq_low_f32_bf16( // CHECK-A64-NEXT: entry: // CHECK-A64-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <8 x bfloat> [[A:%.*]], <8 x bfloat> [[A]], <4 x i32> // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <4 x bfloat> [[SHUFFLE_I]] to <4 x i16> // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <4 x i16> [[TMP0]] to <8 x i8> // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> // CHECK-A64-NEXT: [[TMP3:%.*]] = zext <4 x i16> [[TMP2]] to <4 x i32> // CHECK-A64-NEXT: [[VSHLL_N_I_I:%.*]] = shl <4 x i32> [[TMP3]], splat (i32 16) // CHECK-A64-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[VSHLL_N_I_I]] to <4 x float> // CHECK-A64-NEXT: ret <4 x float> [[TMP4]] // // CHECK-A32-HARDFP-LABEL: @test_vcvtq_low_f32_bf16( // CHECK-A32-HARDFP-NEXT: entry: // CHECK-A32-HARDFP-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <8 x bfloat> [[A:%.*]], <8 x bfloat> [[A]], <4 x i32> // CHECK-A32-HARDFP-NEXT: [[TMP0:%.*]] = bitcast <4 x bfloat> [[SHUFFLE_I]] to <4 x i16> // CHECK-A32-HARDFP-NEXT: [[TMP1:%.*]] = bitcast <4 x i16> [[TMP0]] to <8 x i8> // CHECK-A32-HARDFP-NEXT: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> // CHECK-A32-HARDFP-NEXT: [[TMP3:%.*]] = zext <4 x i16> [[TMP2]] to <4 x i32> // CHECK-A32-HARDFP-NEXT: [[VSHLL_N_I_I:%.*]] = shl <4 x i32> [[TMP3]], splat (i32 16) // CHECK-A32-HARDFP-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[VSHLL_N_I_I]] to <4 x float> // CHECK-A32-HARDFP-NEXT: ret <4 x float> [[TMP4]] // // CHECK-A32-SOFTFP-LABEL: @test_vcvtq_low_f32_bf16( // CHECK-A32-SOFTFP-NEXT: entry: // CHECK-A32-SOFTFP-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[A_COERCE:%.*]] to <8 x bfloat> // CHECK-A32-SOFTFP-NEXT: [[TMP1:%.*]] = bitcast <8 x bfloat> [[TMP0]] to <4 x i32> // CHECK-A32-SOFTFP-NEXT: [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to <8 x bfloat> // CHECK-A32-SOFTFP-NEXT: [[TMP3:%.*]] = bitcast <8 x bfloat> [[TMP2]] to <4 x i32> // CHECK-A32-SOFTFP-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP3]] to <8 x bfloat> // CHECK-A32-SOFTFP-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <8 x bfloat> [[TMP4]], <8 x bfloat> [[TMP4]], <4 x i32> // CHECK-A32-SOFTFP-NEXT: [[TMP5:%.*]] = bitcast <4 x bfloat> [[SHUFFLE_I]] to <2 x i32> // CHECK-A32-SOFTFP-NEXT: [[TMP6:%.*]] = bitcast <2 x i32> [[TMP5]] to <4 x bfloat> // CHECK-A32-SOFTFP-NEXT: [[TMP7:%.*]] = bitcast <4 x bfloat> [[TMP6]] to <2 x i32> // CHECK-A32-SOFTFP-NEXT: [[TMP8:%.*]] = bitcast <2 x i32> [[TMP7]] to <4 x bfloat> // CHECK-A32-SOFTFP-NEXT: [[TMP9:%.*]] = bitcast <4 x bfloat> [[TMP8]] to <4 x i16> // CHECK-A32-SOFTFP-NEXT: [[TMP10:%.*]] = bitcast <4 x i16> [[TMP9]] to <8 x i8> // CHECK-A32-SOFTFP-NEXT: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP10]] to <4 x i16> // CHECK-A32-SOFTFP-NEXT: [[TMP12:%.*]] = zext <4 x i16> [[TMP11]] to <4 x i32> // CHECK-A32-SOFTFP-NEXT: [[VSHLL_N_I_I:%.*]] = shl <4 x i32> [[TMP12]], splat (i32 16) // CHECK-A32-SOFTFP-NEXT: [[TMP13:%.*]] = bitcast <4 x i32> [[VSHLL_N_I_I]] to <4 x float> // CHECK-A32-SOFTFP-NEXT: ret <4 x float> [[TMP13]] // float32x4_t test_vcvtq_low_f32_bf16(bfloat16x8_t a) { return vcvtq_low_f32_bf16(a); } // CHECK-A64-LABEL: @test_vcvtq_high_f32_bf16( // CHECK-A64-NEXT: entry: // CHECK-A64-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <8 x bfloat> [[A:%.*]], <8 x bfloat> [[A]], <4 x i32> // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <4 x bfloat> [[SHUFFLE_I]] to <4 x i16> // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <4 x i16> [[TMP0]] to <8 x i8> // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> // CHECK-A64-NEXT: [[TMP3:%.*]] = zext <4 x i16> [[TMP2]] to <4 x i32> // CHECK-A64-NEXT: [[VSHLL_N_I_I:%.*]] = shl <4 x i32> [[TMP3]], splat (i32 16) // CHECK-A64-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[VSHLL_N_I_I]] to <4 x float> // CHECK-A64-NEXT: ret <4 x float> [[TMP4]] // // CHECK-A32-HARDFP-LABEL: @test_vcvtq_high_f32_bf16( // CHECK-A32-HARDFP-NEXT: entry: // CHECK-A32-HARDFP-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <8 x bfloat> [[A:%.*]], <8 x bfloat> [[A]], <4 x i32> // CHECK-A32-HARDFP-NEXT: [[TMP0:%.*]] = bitcast <4 x bfloat> [[SHUFFLE_I]] to <4 x i16> // CHECK-A32-HARDFP-NEXT: [[TMP1:%.*]] = bitcast <4 x i16> [[TMP0]] to <8 x i8> // CHECK-A32-HARDFP-NEXT: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> // CHECK-A32-HARDFP-NEXT: [[TMP3:%.*]] = zext <4 x i16> [[TMP2]] to <4 x i32> // CHECK-A32-HARDFP-NEXT: [[VSHLL_N_I_I:%.*]] = shl <4 x i32> [[TMP3]], splat (i32 16) // CHECK-A32-HARDFP-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[VSHLL_N_I_I]] to <4 x float> // CHECK-A32-HARDFP-NEXT: ret <4 x float> [[TMP4]] // // CHECK-A32-SOFTFP-LABEL: @test_vcvtq_high_f32_bf16( // CHECK-A32-SOFTFP-NEXT: entry: // CHECK-A32-SOFTFP-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[A_COERCE:%.*]] to <8 x bfloat> // CHECK-A32-SOFTFP-NEXT: [[TMP1:%.*]] = bitcast <8 x bfloat> [[TMP0]] to <4 x i32> // CHECK-A32-SOFTFP-NEXT: [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to <8 x bfloat> // CHECK-A32-SOFTFP-NEXT: [[TMP3:%.*]] = bitcast <8 x bfloat> [[TMP2]] to <4 x i32> // CHECK-A32-SOFTFP-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP3]] to <8 x bfloat> // CHECK-A32-SOFTFP-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <8 x bfloat> [[TMP4]], <8 x bfloat> [[TMP4]], <4 x i32> // CHECK-A32-SOFTFP-NEXT: [[TMP5:%.*]] = bitcast <4 x bfloat> [[SHUFFLE_I]] to <2 x i32> // CHECK-A32-SOFTFP-NEXT: [[TMP6:%.*]] = bitcast <2 x i32> [[TMP5]] to <4 x bfloat> // CHECK-A32-SOFTFP-NEXT: [[TMP7:%.*]] = bitcast <4 x bfloat> [[TMP6]] to <2 x i32> // CHECK-A32-SOFTFP-NEXT: [[TMP8:%.*]] = bitcast <2 x i32> [[TMP7]] to <4 x bfloat> // CHECK-A32-SOFTFP-NEXT: [[TMP9:%.*]] = bitcast <4 x bfloat> [[TMP8]] to <4 x i16> // CHECK-A32-SOFTFP-NEXT: [[TMP10:%.*]] = bitcast <4 x i16> [[TMP9]] to <8 x i8> // CHECK-A32-SOFTFP-NEXT: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP10]] to <4 x i16> // CHECK-A32-SOFTFP-NEXT: [[TMP12:%.*]] = zext <4 x i16> [[TMP11]] to <4 x i32> // CHECK-A32-SOFTFP-NEXT: [[VSHLL_N_I_I:%.*]] = shl <4 x i32> [[TMP12]], splat (i32 16) // CHECK-A32-SOFTFP-NEXT: [[TMP13:%.*]] = bitcast <4 x i32> [[VSHLL_N_I_I]] to <4 x float> // CHECK-A32-SOFTFP-NEXT: ret <4 x float> [[TMP13]] // float32x4_t test_vcvtq_high_f32_bf16(bfloat16x8_t a) { return vcvtq_high_f32_bf16(a); } // CHECK-A64-LABEL: @test_vcvt_bf16_f32( // CHECK-A64-NEXT: entry: // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[A:%.*]] to <4 x i32> // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8> // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x float> // CHECK-A64-NEXT: [[TMP3:%.*]] = fptrunc <4 x float> [[TMP2]] to <4 x bfloat> // CHECK-A64-NEXT: ret <4 x bfloat> [[TMP3]] // // CHECK-A32-HARDFP-LABEL: @test_vcvt_bf16_f32( // CHECK-A32-HARDFP-NEXT: entry: // CHECK-A32-HARDFP-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[A:%.*]] to <4 x i32> // CHECK-A32-HARDFP-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8> // CHECK-A32-HARDFP-NEXT: [[VCVTFP2BF_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x float> // CHECK-A32-HARDFP-NEXT: [[VCVTFP2BF1_I:%.*]] = call <4 x bfloat> @llvm.arm.neon.vcvtfp2bf.v4bf16(<4 x float> [[VCVTFP2BF_I]]) // CHECK-A32-HARDFP-NEXT: ret <4 x bfloat> [[VCVTFP2BF1_I]] // // CHECK-A32-SOFTFP-LABEL: @test_vcvt_bf16_f32( // CHECK-A32-SOFTFP-NEXT: entry: // CHECK-A32-SOFTFP-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[A:%.*]] to <4 x i32> // CHECK-A32-SOFTFP-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8> // CHECK-A32-SOFTFP-NEXT: [[VCVTFP2BF_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x float> // CHECK-A32-SOFTFP-NEXT: [[VCVTFP2BF1_I:%.*]] = call <4 x i16> @llvm.arm.neon.vcvtfp2bf.v4i16(<4 x float> [[VCVTFP2BF_I]]) // CHECK-A32-SOFTFP-NEXT: [[TMP2:%.*]] = bitcast <4 x i16> [[VCVTFP2BF1_I]] to <4 x bfloat> // CHECK-A32-SOFTFP-NEXT: [[TMP3:%.*]] = bitcast <4 x bfloat> [[TMP2]] to <2 x i32> // CHECK-A32-SOFTFP-NEXT: [[TMP4:%.*]] = bitcast <2 x i32> [[TMP3]] to <4 x bfloat> // CHECK-A32-SOFTFP-NEXT: [[TMP5:%.*]] = bitcast <4 x bfloat> [[TMP4]] to <2 x i32> // CHECK-A32-SOFTFP-NEXT: [[TMP6:%.*]] = bitcast <2 x i32> [[TMP5]] to <4 x bfloat> // CHECK-A32-SOFTFP-NEXT: [[TMP7:%.*]] = bitcast <4 x bfloat> [[TMP6]] to <2 x i32> // CHECK-A32-SOFTFP-NEXT: ret <2 x i32> [[TMP7]] // bfloat16x4_t test_vcvt_bf16_f32(float32x4_t a) { return vcvt_bf16_f32(a); } // CHECK-A64-LABEL: @test_vcvtq_low_bf16_f32( // CHECK-A64-NEXT: entry: // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[A:%.*]] to <4 x i32> // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8> // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x float> // CHECK-A64-NEXT: [[TMP3:%.*]] = fptrunc <4 x float> [[TMP2]] to <4 x bfloat> // CHECK-A64-NEXT: [[TMP4:%.*]] = shufflevector <4 x bfloat> [[TMP3]], <4 x bfloat> zeroinitializer, <8 x i32> // CHECK-A64-NEXT: ret <8 x bfloat> [[TMP4]] // // CHECK-A32-HARDFP-LABEL: @test_vcvtq_low_bf16_f32( // CHECK-A32-HARDFP-NEXT: entry: // CHECK-A32-HARDFP-NEXT: [[TMP0:%.*]] = bitcast i64 0 to <4 x bfloat> // CHECK-A32-HARDFP-NEXT: [[TMP1:%.*]] = bitcast <4 x float> [[A:%.*]] to <4 x i32> // CHECK-A32-HARDFP-NEXT: [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to <16 x i8> // CHECK-A32-HARDFP-NEXT: [[VCVTFP2BF_I:%.*]] = bitcast <16 x i8> [[TMP2]] to <4 x float> // CHECK-A32-HARDFP-NEXT: [[VCVTFP2BF1_I:%.*]] = call <4 x bfloat> @llvm.arm.neon.vcvtfp2bf.v4bf16(<4 x float> [[VCVTFP2BF_I]]) // CHECK-A32-HARDFP-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <4 x bfloat> [[TMP0]], <4 x bfloat> [[VCVTFP2BF1_I]], <8 x i32> // CHECK-A32-HARDFP-NEXT: ret <8 x bfloat> [[SHUFFLE_I]] // // CHECK-A32-SOFTFP-LABEL: @test_vcvtq_low_bf16_f32( // CHECK-A32-SOFTFP-NEXT: entry: // CHECK-A32-SOFTFP-NEXT: [[TMP0:%.*]] = bitcast i64 0 to <4 x bfloat> // CHECK-A32-SOFTFP-NEXT: [[TMP1:%.*]] = bitcast <4 x float> [[A:%.*]] to <4 x i32> // CHECK-A32-SOFTFP-NEXT: [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to <16 x i8> // CHECK-A32-SOFTFP-NEXT: [[VCVTFP2BF_I:%.*]] = bitcast <16 x i8> [[TMP2]] to <4 x float> // CHECK-A32-SOFTFP-NEXT: [[VCVTFP2BF1_I:%.*]] = call <4 x i16> @llvm.arm.neon.vcvtfp2bf.v4i16(<4 x float> [[VCVTFP2BF_I]]) // CHECK-A32-SOFTFP-NEXT: [[TMP3:%.*]] = bitcast <4 x i16> [[VCVTFP2BF1_I]] to <4 x bfloat> // CHECK-A32-SOFTFP-NEXT: [[TMP4:%.*]] = bitcast <4 x bfloat> [[TMP3]] to <2 x i32> // CHECK-A32-SOFTFP-NEXT: [[TMP5:%.*]] = bitcast <2 x i32> [[TMP4]] to <4 x bfloat> // CHECK-A32-SOFTFP-NEXT: [[TMP6:%.*]] = bitcast <4 x bfloat> [[TMP0]] to <2 x i32> // CHECK-A32-SOFTFP-NEXT: [[TMP7:%.*]] = bitcast <4 x bfloat> [[TMP5]] to <2 x i32> // CHECK-A32-SOFTFP-NEXT: [[TMP8:%.*]] = bitcast <2 x i32> [[TMP6]] to <4 x bfloat> // CHECK-A32-SOFTFP-NEXT: [[TMP9:%.*]] = bitcast <2 x i32> [[TMP7]] to <4 x bfloat> // CHECK-A32-SOFTFP-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <4 x bfloat> [[TMP8]], <4 x bfloat> [[TMP9]], <8 x i32> // CHECK-A32-SOFTFP-NEXT: [[TMP10:%.*]] = bitcast <8 x bfloat> [[SHUFFLE_I]] to <4 x i32> // CHECK-A32-SOFTFP-NEXT: [[TMP11:%.*]] = bitcast <4 x i32> [[TMP10]] to <8 x bfloat> // CHECK-A32-SOFTFP-NEXT: [[TMP12:%.*]] = bitcast <8 x bfloat> [[TMP11]] to <4 x i32> // CHECK-A32-SOFTFP-NEXT: [[TMP13:%.*]] = bitcast <4 x i32> [[TMP12]] to <8 x bfloat> // CHECK-A32-SOFTFP-NEXT: [[TMP14:%.*]] = bitcast <8 x bfloat> [[TMP13]] to <4 x i32> // CHECK-A32-SOFTFP-NEXT: ret <4 x i32> [[TMP14]] // bfloat16x8_t test_vcvtq_low_bf16_f32(float32x4_t a) { return vcvtq_low_bf16_f32(a); } // CHECK-A64-LABEL: @test_vcvtq_high_bf16_f32( // CHECK-A64-NEXT: entry: // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <8 x bfloat> [[INACTIVE:%.*]] to <8 x i16> // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <4 x float> [[A:%.*]] to <4 x i32> // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <8 x i16> [[TMP0]] to <16 x i8> // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to <16 x i8> // CHECK-A64-NEXT: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP2]] to <8 x bfloat> // CHECK-A64-NEXT: [[TMP5:%.*]] = shufflevector <8 x bfloat> [[TMP4]], <8 x bfloat> poison, <4 x i32> // CHECK-A64-NEXT: [[TMP6:%.*]] = bitcast <16 x i8> [[TMP3]] to <4 x float> // CHECK-A64-NEXT: [[TMP7:%.*]] = fptrunc <4 x float> [[TMP6]] to <4 x bfloat> // CHECK-A64-NEXT: [[TMP8:%.*]] = shufflevector <4 x bfloat> [[TMP5]], <4 x bfloat> [[TMP7]], <8 x i32> // CHECK-A64-NEXT: ret <8 x bfloat> [[TMP8]] // // CHECK-A32-HARDFP-LABEL: @test_vcvtq_high_bf16_f32( // CHECK-A32-HARDFP-NEXT: entry: // CHECK-A32-HARDFP-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[A:%.*]] to <4 x i32> // CHECK-A32-HARDFP-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8> // CHECK-A32-HARDFP-NEXT: [[VCVTFP2BF_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x float> // CHECK-A32-HARDFP-NEXT: [[VCVTFP2BF1_I:%.*]] = call <4 x bfloat> @llvm.arm.neon.vcvtfp2bf.v4bf16(<4 x float> [[VCVTFP2BF_I]]) // CHECK-A32-HARDFP-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <8 x bfloat> [[INACTIVE:%.*]], <8 x bfloat> [[INACTIVE]], <4 x i32> // CHECK-A32-HARDFP-NEXT: [[SHUFFLE_I8:%.*]] = shufflevector <4 x bfloat> [[VCVTFP2BF1_I]], <4 x bfloat> [[SHUFFLE_I]], <8 x i32> // CHECK-A32-HARDFP-NEXT: ret <8 x bfloat> [[SHUFFLE_I8]] // // CHECK-A32-SOFTFP-LABEL: @test_vcvtq_high_bf16_f32( // CHECK-A32-SOFTFP-NEXT: entry: // CHECK-A32-SOFTFP-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[INACTIVE_COERCE:%.*]] to <8 x bfloat> // CHECK-A32-SOFTFP-NEXT: [[TMP1:%.*]] = bitcast <8 x bfloat> [[TMP0]] to <4 x i32> // CHECK-A32-SOFTFP-NEXT: [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to <8 x bfloat> // CHECK-A32-SOFTFP-NEXT: [[TMP3:%.*]] = bitcast <4 x float> [[A:%.*]] to <4 x i32> // CHECK-A32-SOFTFP-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP3]] to <16 x i8> // CHECK-A32-SOFTFP-NEXT: [[VCVTFP2BF_I:%.*]] = bitcast <16 x i8> [[TMP4]] to <4 x float> // CHECK-A32-SOFTFP-NEXT: [[VCVTFP2BF1_I:%.*]] = call <4 x i16> @llvm.arm.neon.vcvtfp2bf.v4i16(<4 x float> [[VCVTFP2BF_I]]) // CHECK-A32-SOFTFP-NEXT: [[TMP5:%.*]] = bitcast <4 x i16> [[VCVTFP2BF1_I]] to <4 x bfloat> // CHECK-A32-SOFTFP-NEXT: [[TMP6:%.*]] = bitcast <4 x bfloat> [[TMP5]] to <2 x i32> // CHECK-A32-SOFTFP-NEXT: [[TMP7:%.*]] = bitcast <2 x i32> [[TMP6]] to <4 x bfloat> // CHECK-A32-SOFTFP-NEXT: [[TMP8:%.*]] = bitcast <8 x bfloat> [[TMP2]] to <4 x i32> // CHECK-A32-SOFTFP-NEXT: [[TMP9:%.*]] = bitcast <4 x i32> [[TMP8]] to <8 x bfloat> // CHECK-A32-SOFTFP-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <8 x bfloat> [[TMP9]], <8 x bfloat> [[TMP9]], <4 x i32> // CHECK-A32-SOFTFP-NEXT: [[TMP10:%.*]] = bitcast <4 x bfloat> [[SHUFFLE_I]] to <2 x i32> // CHECK-A32-SOFTFP-NEXT: [[TMP11:%.*]] = bitcast <2 x i32> [[TMP10]] to <4 x bfloat> // CHECK-A32-SOFTFP-NEXT: [[TMP12:%.*]] = bitcast <4 x bfloat> [[TMP7]] to <2 x i32> // CHECK-A32-SOFTFP-NEXT: [[TMP13:%.*]] = bitcast <4 x bfloat> [[TMP11]] to <2 x i32> // CHECK-A32-SOFTFP-NEXT: [[TMP14:%.*]] = bitcast <2 x i32> [[TMP12]] to <4 x bfloat> // CHECK-A32-SOFTFP-NEXT: [[TMP15:%.*]] = bitcast <2 x i32> [[TMP13]] to <4 x bfloat> // CHECK-A32-SOFTFP-NEXT: [[SHUFFLE_I17:%.*]] = shufflevector <4 x bfloat> [[TMP14]], <4 x bfloat> [[TMP15]], <8 x i32> // CHECK-A32-SOFTFP-NEXT: [[TMP16:%.*]] = bitcast <8 x bfloat> [[SHUFFLE_I17]] to <4 x i32> // CHECK-A32-SOFTFP-NEXT: [[TMP17:%.*]] = bitcast <4 x i32> [[TMP16]] to <8 x bfloat> // CHECK-A32-SOFTFP-NEXT: [[TMP18:%.*]] = bitcast <8 x bfloat> [[TMP17]] to <4 x i32> // CHECK-A32-SOFTFP-NEXT: [[TMP19:%.*]] = bitcast <4 x i32> [[TMP18]] to <8 x bfloat> // CHECK-A32-SOFTFP-NEXT: [[TMP20:%.*]] = bitcast <8 x bfloat> [[TMP19]] to <4 x i32> // CHECK-A32-SOFTFP-NEXT: ret <4 x i32> [[TMP20]] // bfloat16x8_t test_vcvtq_high_bf16_f32(bfloat16x8_t inactive, float32x4_t a) { return vcvtq_high_bf16_f32(inactive, a); } // CHECK-A64-LABEL: @test_vcvth_bf16_f32( // CHECK-A64-NEXT: entry: // CHECK-A64-NEXT: [[TMP0:%.*]] = fptrunc float [[A:%.*]] to bfloat // CHECK-A64-NEXT: ret bfloat [[TMP0]] // // CHECK-A32-HARDFP-LABEL: @test_vcvth_bf16_f32( // CHECK-A32-HARDFP-NEXT: entry: // CHECK-A32-HARDFP-NEXT: [[VCVTBFP2BF_I:%.*]] = call bfloat @llvm.arm.neon.vcvtbfp2bf(float [[A:%.*]]) // CHECK-A32-HARDFP-NEXT: ret bfloat [[VCVTBFP2BF_I]] // // CHECK-A32-SOFTFP-LABEL: @test_vcvth_bf16_f32( // CHECK-A32-SOFTFP-NEXT: entry: // CHECK-A32-SOFTFP-NEXT: [[VCVTBFP2BF_I:%.*]] = call bfloat @llvm.arm.neon.vcvtbfp2bf(float [[A:%.*]]) // CHECK-A32-SOFTFP-NEXT: ret bfloat [[VCVTBFP2BF_I]] // bfloat16_t test_vcvth_bf16_f32(float32_t a) { return vcvth_bf16_f32(a); } // CHECK-LABEL: @test_vcvtah_f32_bf16( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = bitcast bfloat [[A:%.*]] to i16 // CHECK-NEXT: [[CONV_I:%.*]] = zext i16 [[TMP0]] to i32 // CHECK-NEXT: [[SHL_I:%.*]] = shl i32 [[CONV_I]], 16 // CHECK-NEXT: [[TMP1:%.*]] = bitcast i32 [[SHL_I]] to float // CHECK-NEXT: ret float [[TMP1]] // float32_t test_vcvtah_f32_bf16(bfloat16_t a) { return vcvtah_f32_bf16(a); }