aboutsummaryrefslogtreecommitdiff
path: root/target-ppc
diff options
context:
space:
mode:
authorNikunj A Dadhania <nikunj@linux.vnet.ibm.com>2016-09-29 00:11:55 +0530
committerDavid Gibson <david@gibson.dropbear.id.au>2016-10-05 11:05:28 +1100
commitf34001ec96ebc126aad49db4e3e01391b28ed264 (patch)
tree25979f25d0e494a1cc5124c0bc04a81447f7e15e /target-ppc
parentb9731075b3c6417eca2bc14612688046c4b7f9e6 (diff)
downloadqemu-f34001ec96ebc126aad49db4e3e01391b28ed264.zip
qemu-f34001ec96ebc126aad49db4e3e01391b28ed264.tar.gz
qemu-f34001ec96ebc126aad49db4e3e01391b28ed264.tar.bz2
target-ppc: improve lxvw4x implementation
Load 8byte at a time and manipulate. Big-Endian Storage +-------------+-------------+-------------+-------------+ | 00 11 22 33 | 44 55 66 77 | 88 99 AA BB | CC DD EE FF | +-------------+-------------+-------------+-------------+ Little-Endian Storage +-------------+-------------+-------------+-------------+ | 33 22 11 00 | 77 66 55 44 | BB AA 99 88 | FF EE DD CC | +-------------+-------------+-------------+-------------+ Vector load results in (32-bit elements): +----------+----------+----------+----------+ | 00112233 | 44556677 | 8899AABB | CCDDEEFF | +----------+----------+----------+----------+ Signed-off-by: Nikunj A Dadhania <nikunj@linux.vnet.ibm.com> Reviewed-by: Richard Henderson <rth@twiddle.net> [dwg: Slight tweak to commit description] Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Diffstat (limited to 'target-ppc')
-rw-r--r--target-ppc/translate/vsx-impl.inc.c32
1 files changed, 18 insertions, 14 deletions
diff --git a/target-ppc/translate/vsx-impl.inc.c b/target-ppc/translate/vsx-impl.inc.c
index c4c50dd..ec871b2 100644
--- a/target-ppc/translate/vsx-impl.inc.c
+++ b/target-ppc/translate/vsx-impl.inc.c
@@ -75,7 +75,6 @@ static void gen_lxvdsx(DisasContext *ctx)
static void gen_lxvw4x(DisasContext *ctx)
{
TCGv EA;
- TCGv_i64 tmp;
TCGv_i64 xth = cpu_vsrh(xT(ctx->opcode));
TCGv_i64 xtl = cpu_vsrl(xT(ctx->opcode));
if (unlikely(!ctx->vsx_enabled)) {
@@ -84,22 +83,27 @@ static void gen_lxvw4x(DisasContext *ctx)
}
gen_set_access_type(ctx, ACCESS_INT);
EA = tcg_temp_new();
- tmp = tcg_temp_new_i64();
gen_addr_reg_index(ctx, EA);
- gen_qemu_ld32u_i64(ctx, tmp, EA);
- tcg_gen_addi_tl(EA, EA, 4);
- gen_qemu_ld32u_i64(ctx, xth, EA);
- tcg_gen_deposit_i64(xth, xth, tmp, 32, 32);
-
- tcg_gen_addi_tl(EA, EA, 4);
- gen_qemu_ld32u_i64(ctx, tmp, EA);
- tcg_gen_addi_tl(EA, EA, 4);
- gen_qemu_ld32u_i64(ctx, xtl, EA);
- tcg_gen_deposit_i64(xtl, xtl, tmp, 32, 32);
-
+ if (ctx->le_mode) {
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+
+ tcg_gen_qemu_ld_i64(t0, EA, ctx->mem_idx, MO_LEQ);
+ tcg_gen_shri_i64(t1, t0, 32);
+ tcg_gen_deposit_i64(xth, t1, t0, 32, 32);
+ tcg_gen_addi_tl(EA, EA, 8);
+ tcg_gen_qemu_ld_i64(t0, EA, ctx->mem_idx, MO_LEQ);
+ tcg_gen_shri_i64(t1, t0, 32);
+ tcg_gen_deposit_i64(xtl, t1, t0, 32, 32);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ } else {
+ tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_addi_tl(EA, EA, 8);
+ tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEQ);
+ }
tcg_temp_free(EA);
- tcg_temp_free_i64(tmp);
}
#define VSX_STORE_SCALAR(name, operation) \