aboutsummaryrefslogtreecommitdiff
path: root/target
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2019-10-24 16:22:58 +0100
committerPeter Maydell <peter.maydell@linaro.org>2019-10-24 16:22:58 +0100
commit58560ad254fbda71d4daa6622d71683190070ee2 (patch)
treeb5fc3eb6758fc2ccbaab4506c8e11b27c0493353 /target
parent81c1f71eeb874c4cbbb9c5c4d1a1dc0ba7391dff (diff)
parent97c00c54449b4ff349f85c6ce409dadd1b935a7d (diff)
downloadqemu-58560ad254fbda71d4daa6622d71683190070ee2.zip
qemu-58560ad254fbda71d4daa6622d71683190070ee2.tar.gz
qemu-58560ad254fbda71d4daa6622d71683190070ee2.tar.bz2
Merge remote-tracking branch 'remotes/dgibson/tags/ppc-for-4.2-20191024' into staging
ppc patch queue 2019-10-24 Last pull request before soft freeze. * Lots of fixes and cleanups for spapr interrupt controllers * More SLOF updates to fix problems with full FDT rendering at CAS time (alas, more yet are to come) * A few other assorted changes This isn't quite as well tested as I usually try to do before a pull request. But I've been sick and running into some other difficulties, and wanted to get this sent out before heading towards KVM forum. # gpg: Signature made Thu 24 Oct 2019 09:14:31 BST # gpg: using RSA key 75F46586AE61A66CC44E87DC6C38CACA20D9B392 # gpg: Good signature from "David Gibson <david@gibson.dropbear.id.au>" [full] # gpg: aka "David Gibson (Red Hat) <dgibson@redhat.com>" [full] # gpg: aka "David Gibson (ozlabs.org) <dgibson@ozlabs.org>" [full] # gpg: aka "David Gibson (kernel.org) <dwg@kernel.org>" [unknown] # Primary key fingerprint: 75F4 6586 AE61 A66C C44E 87DC 6C38 CACA 20D9 B392 * remotes/dgibson/tags/ppc-for-4.2-20191024: (28 commits) spapr/xive: Set the OS CAM line at reset ppc/pnv: Fix naming of routines realizing the CPUs ppc: Reset the interrupt presenter from the CPU reset handler ppc/pnv: Add a PnvChip pointer to PnvCore ppc/pnv: Introduce a PnvCore reset handler spapr_cpu_core: Implement DeviceClass::reset spapr: move CPU reset after presenter creation spapr: Don't request to unplug the same core twice pseries: Update SLOF firmware image spapr: Move SpaprIrq::nr_xirqs to SpaprMachineClass spapr: Remove SpaprIrq::nr_msis spapr, xics, xive: Move SpaprIrq::post_load hook to backends spapr, xics, xive: Move SpaprIrq::reset hook logic into activate/deactivate spapr: Remove SpaprIrq::init_kvm hook spapr, xics, xive: Match signatures for XICS and XIVE KVM connect routines spapr, xics, xive: Move dt_populate from SpaprIrq to SpaprInterruptController spapr, xics, xive: Move print_info from SpaprIrq to SpaprInterruptController spapr, xics, xive: Move set_irq from SpaprIrq to SpaprInterruptController spapr: Formalize notion of active interrupt controller spapr, xics, xive: Move irq claim and free from SpaprIrq to SpaprInterruptController ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'target')
-rw-r--r--target/ppc/translate/vmx-impl.inc.c84
1 files changed, 40 insertions, 44 deletions
diff --git a/target/ppc/translate/vmx-impl.inc.c b/target/ppc/translate/vmx-impl.inc.c
index 2472a52..81d5a7a 100644
--- a/target/ppc/translate/vmx-impl.inc.c
+++ b/target/ppc/translate/vmx-impl.inc.c
@@ -590,40 +590,38 @@ static void trans_vsl(DisasContext *ctx)
int VT = rD(ctx->opcode);
int VA = rA(ctx->opcode);
int VB = rB(ctx->opcode);
- TCGv_i64 avrA = tcg_temp_new_i64();
- TCGv_i64 avrB = tcg_temp_new_i64();
+ TCGv_i64 avr = tcg_temp_new_i64();
TCGv_i64 sh = tcg_temp_new_i64();
- TCGv_i64 shifted = tcg_temp_new_i64();
+ TCGv_i64 carry = tcg_temp_new_i64();
TCGv_i64 tmp = tcg_temp_new_i64();
- /* Place bits 125-127 of vB in sh. */
- get_avr64(avrB, VB, false);
- tcg_gen_andi_i64(sh, avrB, 0x07ULL);
+ /* Place bits 125-127 of vB in 'sh'. */
+ get_avr64(avr, VB, false);
+ tcg_gen_andi_i64(sh, avr, 0x07ULL);
/*
- * Save highest sh bits of lower doubleword element of vA in variable
- * shifted and perform shift on lower doubleword.
+ * Save highest 'sh' bits of lower doubleword element of vA in variable
+ * 'carry' and perform shift on lower doubleword.
*/
- get_avr64(avrA, VA, false);
- tcg_gen_subfi_i64(tmp, 64, sh);
- tcg_gen_shr_i64(shifted, avrA, tmp);
- tcg_gen_andi_i64(shifted, shifted, 0x7fULL);
- tcg_gen_shl_i64(avrA, avrA, sh);
- set_avr64(VT, avrA, false);
+ get_avr64(avr, VA, false);
+ tcg_gen_subfi_i64(tmp, 32, sh);
+ tcg_gen_shri_i64(carry, avr, 32);
+ tcg_gen_shr_i64(carry, carry, tmp);
+ tcg_gen_shl_i64(avr, avr, sh);
+ set_avr64(VT, avr, false);
/*
* Perform shift on higher doubleword element of vA and replace lowest
- * sh bits with shifted.
+ * 'sh' bits with 'carry'.
*/
- get_avr64(avrA, VA, true);
- tcg_gen_shl_i64(avrA, avrA, sh);
- tcg_gen_or_i64(avrA, avrA, shifted);
- set_avr64(VT, avrA, true);
+ get_avr64(avr, VA, true);
+ tcg_gen_shl_i64(avr, avr, sh);
+ tcg_gen_or_i64(avr, avr, carry);
+ set_avr64(VT, avr, true);
- tcg_temp_free_i64(avrA);
- tcg_temp_free_i64(avrB);
+ tcg_temp_free_i64(avr);
tcg_temp_free_i64(sh);
- tcg_temp_free_i64(shifted);
+ tcg_temp_free_i64(carry);
tcg_temp_free_i64(tmp);
}
@@ -639,39 +637,37 @@ static void trans_vsr(DisasContext *ctx)
int VT = rD(ctx->opcode);
int VA = rA(ctx->opcode);
int VB = rB(ctx->opcode);
- TCGv_i64 avrA = tcg_temp_new_i64();
- TCGv_i64 avrB = tcg_temp_new_i64();
+ TCGv_i64 avr = tcg_temp_new_i64();
TCGv_i64 sh = tcg_temp_new_i64();
- TCGv_i64 shifted = tcg_temp_new_i64();
+ TCGv_i64 carry = tcg_temp_new_i64();
TCGv_i64 tmp = tcg_temp_new_i64();
- /* Place bits 125-127 of vB in sh. */
- get_avr64(avrB, VB, false);
- tcg_gen_andi_i64(sh, avrB, 0x07ULL);
+ /* Place bits 125-127 of vB in 'sh'. */
+ get_avr64(avr, VB, false);
+ tcg_gen_andi_i64(sh, avr, 0x07ULL);
/*
- * Save lowest sh bits of higher doubleword element of vA in variable
- * shifted and perform shift on higher doubleword.
+ * Save lowest 'sh' bits of higher doubleword element of vA in variable
+ * 'carry' and perform shift on higher doubleword.
*/
- get_avr64(avrA, VA, true);
- tcg_gen_subfi_i64(tmp, 64, sh);
- tcg_gen_shl_i64(shifted, avrA, tmp);
- tcg_gen_andi_i64(shifted, shifted, 0xfe00000000000000ULL);
- tcg_gen_shr_i64(avrA, avrA, sh);
- set_avr64(VT, avrA, true);
+ get_avr64(avr, VA, true);
+ tcg_gen_subfi_i64(tmp, 32, sh);
+ tcg_gen_shli_i64(carry, avr, 32);
+ tcg_gen_shl_i64(carry, carry, tmp);
+ tcg_gen_shr_i64(avr, avr, sh);
+ set_avr64(VT, avr, true);
/*
* Perform shift on lower doubleword element of vA and replace highest
- * sh bits with shifted.
+ * 'sh' bits with 'carry'.
*/
- get_avr64(avrA, VA, false);
- tcg_gen_shr_i64(avrA, avrA, sh);
- tcg_gen_or_i64(avrA, avrA, shifted);
- set_avr64(VT, avrA, false);
+ get_avr64(avr, VA, false);
+ tcg_gen_shr_i64(avr, avr, sh);
+ tcg_gen_or_i64(avr, avr, carry);
+ set_avr64(VT, avr, false);
- tcg_temp_free_i64(avrA);
- tcg_temp_free_i64(avrB);
+ tcg_temp_free_i64(avr);
tcg_temp_free_i64(sh);
- tcg_temp_free_i64(shifted);
+ tcg_temp_free_i64(carry);
tcg_temp_free_i64(tmp);
}