diff options
author | Lucas Mateus Castro (alqotel) <lucas.araujo@eldorado.org.br> | 2022-10-19 09:50:37 -0300 |
---|---|---|
committer | Daniel Henrique Barboza <danielhb413@gmail.com> | 2022-10-28 13:15:22 -0300 |
commit | 95a89d3118590aaed3a56b52e08246cdb51a108e (patch) | |
tree | 584a154e493524b0fd26040e1f54588fcb8df1c1 /target/ppc/translate | |
parent | a5b36805192ac12f114c45e9c8ccbe4ce9ba1cf9 (diff) | |
download | qemu-95a89d3118590aaed3a56b52e08246cdb51a108e.zip qemu-95a89d3118590aaed3a56b52e08246cdb51a108e.tar.gz qemu-95a89d3118590aaed3a56b52e08246cdb51a108e.tar.bz2 |
target/ppc: Use gvec to decode XVCPSGN[SD]P
Moved XVCPSGNSP and XVCPSGNDP to decodetree and used gvec to translate
them.
xvcpsgnsp:
rept loop master patch
8 12500 0,00561400 0,00537900 (-4.2%)
25 4000 0,00562100 0,00400000 (-28.8%)
100 1000 0,00696900 0,00416300 (-40.3%)
500 200 0,02211900 0,00840700 (-62.0%)
2500 40 0,09328600 0,02728300 (-70.8%)
8000 12 0,27295300 0,06867800 (-74.8%)
xvcpsgndp:
rept loop master patch
8 12500 0,00556300 0,00584200 (+5.0%)
25 4000 0,00482700 0,00431700 (-10.6%)
100 1000 0,00585800 0,00464400 (-20.7%)
500 200 0,01565300 0,00839700 (-46.4%)
2500 40 0,05766500 0,02430600 (-57.8%)
8000 12 0,19875300 0,07947100 (-60.0%)
Like the previous instructions there seemed to be a improvement on
translation time.
Signed-off-by: Lucas Mateus Castro (alqotel) <lucas.araujo@eldorado.org.br>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20221019125040.48028-10-lucas.araujo@eldorado.org.br>
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
Diffstat (limited to 'target/ppc/translate')
-rw-r--r-- | target/ppc/translate/vsx-impl.c.inc | 109 | ||||
-rw-r--r-- | target/ppc/translate/vsx-ops.c.inc | 3 |
2 files changed, 53 insertions, 59 deletions
diff --git a/target/ppc/translate/vsx-impl.c.inc b/target/ppc/translate/vsx-impl.c.inc index 8717e20..1c28923 100644 --- a/target/ppc/translate/vsx-impl.c.inc +++ b/target/ppc/translate/vsx-impl.c.inc @@ -729,62 +729,6 @@ VSX_SCALAR_MOVE_QP(xsnabsqp, OP_NABS, SGN_MASK_DP) VSX_SCALAR_MOVE_QP(xsnegqp, OP_NEG, SGN_MASK_DP) VSX_SCALAR_MOVE_QP(xscpsgnqp, OP_CPSGN, SGN_MASK_DP) -#define VSX_VECTOR_MOVE(name, op, sgn_mask) \ -static void glue(gen_, name)(DisasContext *ctx) \ - { \ - TCGv_i64 xbh, xbl, sgm; \ - if (unlikely(!ctx->vsx_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_VSXU); \ - return; \ - } \ - xbh = tcg_temp_new_i64(); \ - xbl = tcg_temp_new_i64(); \ - sgm = tcg_temp_new_i64(); \ - get_cpu_vsr(xbh, xB(ctx->opcode), true); \ - get_cpu_vsr(xbl, xB(ctx->opcode), false); \ - tcg_gen_movi_i64(sgm, sgn_mask); \ - switch (op) { \ - case OP_ABS: { \ - tcg_gen_andc_i64(xbh, xbh, sgm); \ - tcg_gen_andc_i64(xbl, xbl, sgm); \ - break; \ - } \ - case OP_NABS: { \ - tcg_gen_or_i64(xbh, xbh, sgm); \ - tcg_gen_or_i64(xbl, xbl, sgm); \ - break; \ - } \ - case OP_NEG: { \ - tcg_gen_xor_i64(xbh, xbh, sgm); \ - tcg_gen_xor_i64(xbl, xbl, sgm); \ - break; \ - } \ - case OP_CPSGN: { \ - TCGv_i64 xah = tcg_temp_new_i64(); \ - TCGv_i64 xal = tcg_temp_new_i64(); \ - get_cpu_vsr(xah, xA(ctx->opcode), true); \ - get_cpu_vsr(xal, xA(ctx->opcode), false); \ - tcg_gen_and_i64(xah, xah, sgm); \ - tcg_gen_and_i64(xal, xal, sgm); \ - tcg_gen_andc_i64(xbh, xbh, sgm); \ - tcg_gen_andc_i64(xbl, xbl, sgm); \ - tcg_gen_or_i64(xbh, xbh, xah); \ - tcg_gen_or_i64(xbl, xbl, xal); \ - tcg_temp_free_i64(xah); \ - tcg_temp_free_i64(xal); \ - break; \ - } \ - } \ - set_cpu_vsr(xT(ctx->opcode), xbh, true); \ - set_cpu_vsr(xT(ctx->opcode), xbl, false); \ - tcg_temp_free_i64(xbh); \ - tcg_temp_free_i64(xbl); \ - tcg_temp_free_i64(sgm); \ - } - -VSX_VECTOR_MOVE(xvcpsgndp, OP_CPSGN, SGN_MASK_DP) -VSX_VECTOR_MOVE(xvcpsgnsp, OP_CPSGN, SGN_MASK_SP) - #define TCG_OP_IMM_i64(FUNC, OP, IMM) \ static void FUNC(TCGv_i64 t, TCGv_i64 b) \ { \ @@ -852,6 +796,59 @@ TRANS(XVABSSP, do_vsx_msb_op, MO_32, do_xvabs_vec, do_xvabssp_i64) TRANS(XVNABSSP, do_vsx_msb_op, MO_32, do_xvnabs_vec, do_xvnabssp_i64) TRANS(XVNEGSP, do_vsx_msb_op, MO_32, do_xvneg_vec, do_xvnegsp_i64) +static void do_xvcpsgndp_i64(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b) +{ + tcg_gen_andi_i64(a, a, SGN_MASK_DP); + tcg_gen_andi_i64(b, b, ~SGN_MASK_DP); + tcg_gen_or_i64(t, a, b); +} + +static void do_xvcpsgnsp_i64(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b) +{ + tcg_gen_andi_i64(a, a, SGN_MASK_SP); + tcg_gen_andi_i64(b, b, ~SGN_MASK_SP); + tcg_gen_or_i64(t, a, b); +} + +static void do_xvcpsgn_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b) +{ + uint64_t msb = (vece == MO_32) ? SGN_MASK_SP : SGN_MASK_DP; + tcg_gen_bitsel_vec(vece, t, tcg_constant_vec_matching(t, vece, msb), a, b); +} + +static bool do_xvcpsgn(DisasContext *ctx, arg_XX3 *a, unsigned vece) +{ + static const TCGOpcode vecop_list[] = { + 0 + }; + + static const GVecGen3 op[] = { + { + .fni8 = do_xvcpsgnsp_i64, + .fniv = do_xvcpsgn_vec, + .opt_opc = vecop_list, + .vece = MO_32 + }, + { + .fni8 = do_xvcpsgndp_i64, + .fniv = do_xvcpsgn_vec, + .opt_opc = vecop_list, + .vece = MO_64 + }, + }; + + REQUIRE_INSNS_FLAGS2(ctx, VSX); + REQUIRE_VSX(ctx); + + tcg_gen_gvec_3(vsr_full_offset(a->xt), vsr_full_offset(a->xa), + vsr_full_offset(a->xb), 16, 16, &op[vece - MO_32]); + + return true; +} + +TRANS(XVCPSGNSP, do_xvcpsgn, MO_32) +TRANS(XVCPSGNDP, do_xvcpsgn, MO_64) + #define VSX_CMP(name, op1, op2, inval, type) \ static void gen_##name(DisasContext *ctx) \ { \ diff --git a/target/ppc/translate/vsx-ops.c.inc b/target/ppc/translate/vsx-ops.c.inc index b77324e..f7d7377 100644 --- a/target/ppc/translate/vsx-ops.c.inc +++ b/target/ppc/translate/vsx-ops.c.inc @@ -165,9 +165,6 @@ GEN_XX3FORM(name, opc2, opc3 | 1, fl2) GEN_XX2FORM_DCMX(xvtstdcdp, 0x14, 0x1E, PPC2_ISA300), GEN_XX2FORM_DCMX(xvtstdcsp, 0x14, 0x1A, PPC2_ISA300), -GEN_XX3FORM(xvcpsgndp, 0x00, 0x1E, PPC2_VSX), -GEN_XX3FORM(xvcpsgnsp, 0x00, 0x1A, PPC2_VSX), - GEN_XX3FORM(xsadddp, 0x00, 0x04, PPC2_VSX), GEN_VSX_XFORM_300(xsaddqp, 0x04, 0x00, 0x0), GEN_XX3FORM(xssubdp, 0x00, 0x05, PPC2_VSX), |