aboutsummaryrefslogtreecommitdiff
path: root/target
diff options
context:
space:
mode:
authorStefan Brankovic <stefan.brankovic@rt-rk.com>2019-10-04 15:43:59 +0200
committerDavid Gibson <david@gibson.dropbear.id.au>2019-10-24 09:36:55 +1100
commit8d745875c28528a30155bfa0ca992e2202d08b96 (patch)
tree1585981a1fec2d8f6fa3e81ccc7e32a73767dfe0 /target
parente6144bf912a69b747be43f490a815871dca4f1ed (diff)
downloadqemu-8d745875c28528a30155bfa0ca992e2202d08b96.zip
qemu-8d745875c28528a30155bfa0ca992e2202d08b96.tar.gz
qemu-8d745875c28528a30155bfa0ca992e2202d08b96.tar.bz2
target/ppc: Fix for optimized vsl/vsr instructions
In previous implementation, invocation of TCG shift function could request shift of TCG variable by 64 bits when variable 'sh' is 0, which is not supported in TCG (values can be shifted by 0 to 63 bits). This patch fixes this by using two separate invocation of TCG shift functions, with maximum shift amount of 32. Name of variable 'shifted' is changed to 'carry' so variable naming is similar to old helper implementation. Variables 'avrA' and 'avrB' are replaced with variable 'avr'. Fixes: 4e6d0920e7547e6af4bbac5ffe9adfe6ea621822 Reported-by: "Paul A. Clark" <pc@us.ibm.com> Reported-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> Suggested-by: Aleksandar Markovic <aleksandar.markovic@rt-rk.com> Signed-off-by: Stefan Brankovic <stefan.brankovic@rt-rk.com> Message-Id: <1570196639-7025-2-git-send-email-stefan.brankovic@rt-rk.com> Tested-by: Paul A. Clarke <pc@us.ibm.com> Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Diffstat (limited to 'target')
-rw-r--r--target/ppc/translate/vmx-impl.inc.c84
1 files changed, 40 insertions, 44 deletions
diff --git a/target/ppc/translate/vmx-impl.inc.c b/target/ppc/translate/vmx-impl.inc.c
index 2472a52..81d5a7a 100644
--- a/target/ppc/translate/vmx-impl.inc.c
+++ b/target/ppc/translate/vmx-impl.inc.c
@@ -590,40 +590,38 @@ static void trans_vsl(DisasContext *ctx)
int VT = rD(ctx->opcode);
int VA = rA(ctx->opcode);
int VB = rB(ctx->opcode);
- TCGv_i64 avrA = tcg_temp_new_i64();
- TCGv_i64 avrB = tcg_temp_new_i64();
+ TCGv_i64 avr = tcg_temp_new_i64();
TCGv_i64 sh = tcg_temp_new_i64();
- TCGv_i64 shifted = tcg_temp_new_i64();
+ TCGv_i64 carry = tcg_temp_new_i64();
TCGv_i64 tmp = tcg_temp_new_i64();
- /* Place bits 125-127 of vB in sh. */
- get_avr64(avrB, VB, false);
- tcg_gen_andi_i64(sh, avrB, 0x07ULL);
+ /* Place bits 125-127 of vB in 'sh'. */
+ get_avr64(avr, VB, false);
+ tcg_gen_andi_i64(sh, avr, 0x07ULL);
/*
- * Save highest sh bits of lower doubleword element of vA in variable
- * shifted and perform shift on lower doubleword.
+ * Save highest 'sh' bits of lower doubleword element of vA in variable
+ * 'carry' and perform shift on lower doubleword.
*/
- get_avr64(avrA, VA, false);
- tcg_gen_subfi_i64(tmp, 64, sh);
- tcg_gen_shr_i64(shifted, avrA, tmp);
- tcg_gen_andi_i64(shifted, shifted, 0x7fULL);
- tcg_gen_shl_i64(avrA, avrA, sh);
- set_avr64(VT, avrA, false);
+ get_avr64(avr, VA, false);
+ tcg_gen_subfi_i64(tmp, 32, sh);
+ tcg_gen_shri_i64(carry, avr, 32);
+ tcg_gen_shr_i64(carry, carry, tmp);
+ tcg_gen_shl_i64(avr, avr, sh);
+ set_avr64(VT, avr, false);
/*
* Perform shift on higher doubleword element of vA and replace lowest
- * sh bits with shifted.
+ * 'sh' bits with 'carry'.
*/
- get_avr64(avrA, VA, true);
- tcg_gen_shl_i64(avrA, avrA, sh);
- tcg_gen_or_i64(avrA, avrA, shifted);
- set_avr64(VT, avrA, true);
+ get_avr64(avr, VA, true);
+ tcg_gen_shl_i64(avr, avr, sh);
+ tcg_gen_or_i64(avr, avr, carry);
+ set_avr64(VT, avr, true);
- tcg_temp_free_i64(avrA);
- tcg_temp_free_i64(avrB);
+ tcg_temp_free_i64(avr);
tcg_temp_free_i64(sh);
- tcg_temp_free_i64(shifted);
+ tcg_temp_free_i64(carry);
tcg_temp_free_i64(tmp);
}
@@ -639,39 +637,37 @@ static void trans_vsr(DisasContext *ctx)
int VT = rD(ctx->opcode);
int VA = rA(ctx->opcode);
int VB = rB(ctx->opcode);
- TCGv_i64 avrA = tcg_temp_new_i64();
- TCGv_i64 avrB = tcg_temp_new_i64();
+ TCGv_i64 avr = tcg_temp_new_i64();
TCGv_i64 sh = tcg_temp_new_i64();
- TCGv_i64 shifted = tcg_temp_new_i64();
+ TCGv_i64 carry = tcg_temp_new_i64();
TCGv_i64 tmp = tcg_temp_new_i64();
- /* Place bits 125-127 of vB in sh. */
- get_avr64(avrB, VB, false);
- tcg_gen_andi_i64(sh, avrB, 0x07ULL);
+ /* Place bits 125-127 of vB in 'sh'. */
+ get_avr64(avr, VB, false);
+ tcg_gen_andi_i64(sh, avr, 0x07ULL);
/*
- * Save lowest sh bits of higher doubleword element of vA in variable
- * shifted and perform shift on higher doubleword.
+ * Save lowest 'sh' bits of higher doubleword element of vA in variable
+ * 'carry' and perform shift on higher doubleword.
*/
- get_avr64(avrA, VA, true);
- tcg_gen_subfi_i64(tmp, 64, sh);
- tcg_gen_shl_i64(shifted, avrA, tmp);
- tcg_gen_andi_i64(shifted, shifted, 0xfe00000000000000ULL);
- tcg_gen_shr_i64(avrA, avrA, sh);
- set_avr64(VT, avrA, true);
+ get_avr64(avr, VA, true);
+ tcg_gen_subfi_i64(tmp, 32, sh);
+ tcg_gen_shli_i64(carry, avr, 32);
+ tcg_gen_shl_i64(carry, carry, tmp);
+ tcg_gen_shr_i64(avr, avr, sh);
+ set_avr64(VT, avr, true);
/*
* Perform shift on lower doubleword element of vA and replace highest
- * sh bits with shifted.
+ * 'sh' bits with 'carry'.
*/
- get_avr64(avrA, VA, false);
- tcg_gen_shr_i64(avrA, avrA, sh);
- tcg_gen_or_i64(avrA, avrA, shifted);
- set_avr64(VT, avrA, false);
+ get_avr64(avr, VA, false);
+ tcg_gen_shr_i64(avr, avr, sh);
+ tcg_gen_or_i64(avr, avr, carry);
+ set_avr64(VT, avr, false);
- tcg_temp_free_i64(avrA);
- tcg_temp_free_i64(avrB);
+ tcg_temp_free_i64(avr);
tcg_temp_free_i64(sh);
- tcg_temp_free_i64(shifted);
+ tcg_temp_free_i64(carry);
tcg_temp_free_i64(tmp);
}