diff options
Diffstat (limited to 'target-arm')
-rw-r--r-- | target-arm/cpu.h | 1 | ||||
-rw-r--r-- | target-arm/helper.c | 16 | ||||
-rw-r--r-- | target-arm/helpers.h | 3 | ||||
-rw-r--r-- | target-arm/translate.c | 85 |
4 files changed, 105 insertions, 0 deletions
diff --git a/target-arm/cpu.h b/target-arm/cpu.h index 257e7aa..6c0f9d6 100644 --- a/target-arm/cpu.h +++ b/target-arm/cpu.h @@ -337,6 +337,7 @@ enum arm_features { ARM_FEATURE_THUMB2, ARM_FEATURE_MPU, /* Only has Memory Protection Unit, not full MMU. */ ARM_FEATURE_VFP3, + ARM_FEATURE_VFP_FP16, ARM_FEATURE_NEON, ARM_FEATURE_DIV, ARM_FEATURE_M, /* Microcontroller profile. */ diff --git a/target-arm/helper.c b/target-arm/helper.c index 5e10533..cb95c6e 100644 --- a/target-arm/helper.c +++ b/target-arm/helper.c @@ -115,6 +115,7 @@ static void cpu_reset_model_id(CPUARMState *env, uint32_t id) set_feature(env, ARM_FEATURE_THUMB2); set_feature(env, ARM_FEATURE_VFP); set_feature(env, ARM_FEATURE_VFP3); + set_feature(env, ARM_FEATURE_VFP_FP16); set_feature(env, ARM_FEATURE_NEON); set_feature(env, ARM_FEATURE_THUMB2EE); set_feature(env, ARM_FEATURE_DIV); @@ -2568,6 +2569,21 @@ VFP_CONV_FIX(uh, s, float32, uint16, u) VFP_CONV_FIX(ul, s, float32, uint32, u) #undef VFP_CONV_FIX +/* Half precision conversions. */ +float32 HELPER(vfp_fcvt_f16_to_f32)(uint32_t a, CPUState *env) +{ + float_status *s = &env->vfp.fp_status; + int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0; + return float16_to_float32(a, ieee, s); +} + +uint32_t HELPER(vfp_fcvt_f32_to_f16)(float32 a, CPUState *env) +{ + float_status *s = &env->vfp.fp_status; + int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0; + return float32_to_float16(a, ieee, s); +} + float32 HELPER(recps_f32)(float32 a, float32 b, CPUState *env) { float_status *s = &env->vfp.fp_status; diff --git a/target-arm/helpers.h b/target-arm/helpers.h index 4d07e0c..dc25f18 100644 --- a/target-arm/helpers.h +++ b/target-arm/helpers.h @@ -131,6 +131,9 @@ DEF_HELPER_3(vfp_sltod, f64, f64, i32, env) DEF_HELPER_3(vfp_uhtod, f64, f64, i32, env) DEF_HELPER_3(vfp_ultod, f64, f64, i32, env) +DEF_HELPER_2(vfp_fcvt_f16_to_f32, f32, i32, env) +DEF_HELPER_2(vfp_fcvt_f32_to_f16, i32, f32, env) + DEF_HELPER_3(recps_f32, f32, f32, f32, env) DEF_HELPER_3(rsqrts_f32, f32, f32, f32, env) DEF_HELPER_2(recpe_f32, f32, f32, env) diff --git a/target-arm/translate.c b/target-arm/translate.c index 5784566..a002f7e 100644 --- a/target-arm/translate.c +++ b/target-arm/translate.c @@ -2974,6 +2974,47 @@ static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn) case 3: /* sqrt */ gen_vfp_sqrt(dp); break; + case 4: /* vcvtb.f32.f16 */ + if (!arm_feature(env, ARM_FEATURE_VFP_FP16)) + return 1; + tmp = gen_vfp_mrs(); + tcg_gen_ext16u_i32(tmp, tmp); + gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env); + dead_tmp(tmp); + break; + case 5: /* vcvtt.f32.f16 */ + if (!arm_feature(env, ARM_FEATURE_VFP_FP16)) + return 1; + tmp = gen_vfp_mrs(); + tcg_gen_shri_i32(tmp, tmp, 16); + gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env); + dead_tmp(tmp); + break; + case 6: /* vcvtb.f16.f32 */ + if (!arm_feature(env, ARM_FEATURE_VFP_FP16)) + return 1; + tmp = new_tmp(); + gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env); + gen_mov_F0_vreg(0, rd); + tmp2 = gen_vfp_mrs(); + tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000); + tcg_gen_or_i32(tmp, tmp, tmp2); + dead_tmp(tmp2); + gen_vfp_msr(tmp); + break; + case 7: /* vcvtt.f16.f32 */ + if (!arm_feature(env, ARM_FEATURE_VFP_FP16)) + return 1; + tmp = new_tmp(); + gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env); + tcg_gen_shli_i32(tmp, tmp, 16); + gen_mov_F0_vreg(0, rd); + tmp2 = gen_vfp_mrs(); + tcg_gen_ext16u_i32(tmp2, tmp2); + tcg_gen_or_i32(tmp, tmp, tmp2); + dead_tmp(tmp2); + gen_vfp_msr(tmp); + break; case 8: /* cmp */ gen_vfp_cmp(dp); break; @@ -5328,6 +5369,50 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn) neon_store_reg64(cpu_V0, rd + pass); } break; + case 44: /* VCVT.F16.F32 */ + if (!arm_feature(env, ARM_FEATURE_VFP_FP16)) + return 1; + tmp = new_tmp(); + tmp2 = new_tmp(); + tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0)); + gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env); + tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1)); + gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env); + tcg_gen_shli_i32(tmp2, tmp2, 16); + tcg_gen_or_i32(tmp2, tmp2, tmp); + tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2)); + gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env); + tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3)); + neon_store_reg(rd, 0, tmp2); + tmp2 = new_tmp(); + gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env); + tcg_gen_shli_i32(tmp2, tmp2, 16); + tcg_gen_or_i32(tmp2, tmp2, tmp); + neon_store_reg(rd, 1, tmp2); + dead_tmp(tmp); + break; + case 46: /* VCVT.F32.F16 */ + if (!arm_feature(env, ARM_FEATURE_VFP_FP16)) + return 1; + tmp3 = new_tmp(); + tmp = neon_load_reg(rm, 0); + tmp2 = neon_load_reg(rm, 1); + tcg_gen_ext16u_i32(tmp3, tmp); + gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env); + tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0)); + tcg_gen_shri_i32(tmp3, tmp, 16); + gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env); + tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1)); + dead_tmp(tmp); + tcg_gen_ext16u_i32(tmp3, tmp2); + gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env); + tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2)); + tcg_gen_shri_i32(tmp3, tmp2, 16); + gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env); + tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3)); + dead_tmp(tmp2); + dead_tmp(tmp3); + break; default: elementwise: for (pass = 0; pass < (q ? 4 : 2); pass++) { |