aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2020-07-07 21:41:08 +0100
committerPeter Maydell <peter.maydell@linaro.org>2020-07-07 21:41:08 +0100
commitcd9557616fe30a1926a57b516ee2ceb07faf1cb7 (patch)
tree25754275f79cbf99fc8dbf76f28b4eae549d13fd
parenteb2c66b10efd2b914b56b20ae90655914310c925 (diff)
parent852f933e482518797f7785a2e017a215b88df815 (diff)
downloadqemu-cd9557616fe30a1926a57b516ee2ceb07faf1cb7.zip
qemu-cd9557616fe30a1926a57b516ee2ceb07faf1cb7.tar.gz
qemu-cd9557616fe30a1926a57b516ee2ceb07faf1cb7.tar.bz2
Merge remote-tracking branch 'remotes/rth/tags/pull-tcg-20200706' into staging
Fix for ppc shifts Fix for non-parallel atomic ops # gpg: Signature made Mon 06 Jul 2020 19:49:08 BST # gpg: using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F # gpg: issuer "richard.henderson@linaro.org" # gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full] # Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A 05C0 64DF 38E8 AF7E 215F * remotes/rth/tags/pull-tcg-20200706: tcg: Fix do_nonatomic_op_* vs signed operations tcg/ppc: Sanitize immediate shifts Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
-rw-r--r--tcg/ppc/tcg-target.inc.c15
-rw-r--r--tcg/tcg-op.c10
2 files changed, 16 insertions, 9 deletions
diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c
index 7da6708..c8d1e76 100644
--- a/tcg/ppc/tcg-target.inc.c
+++ b/tcg/ppc/tcg-target.inc.c
@@ -2610,21 +2610,24 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
case INDEX_op_shl_i32:
if (const_args[2]) {
- tcg_out_shli32(s, args[0], args[1], args[2]);
+ /* Limit immediate shift count lest we create an illegal insn. */
+ tcg_out_shli32(s, args[0], args[1], args[2] & 31);
} else {
tcg_out32(s, SLW | SAB(args[1], args[0], args[2]));
}
break;
case INDEX_op_shr_i32:
if (const_args[2]) {
- tcg_out_shri32(s, args[0], args[1], args[2]);
+ /* Limit immediate shift count lest we create an illegal insn. */
+ tcg_out_shri32(s, args[0], args[1], args[2] & 31);
} else {
tcg_out32(s, SRW | SAB(args[1], args[0], args[2]));
}
break;
case INDEX_op_sar_i32:
if (const_args[2]) {
- tcg_out32(s, SRAWI | RS(args[1]) | RA(args[0]) | SH(args[2]));
+ /* Limit immediate shift count lest we create an illegal insn. */
+ tcg_out32(s, SRAWI | RS(args[1]) | RA(args[0]) | SH(args[2] & 31));
} else {
tcg_out32(s, SRAW | SAB(args[1], args[0], args[2]));
}
@@ -2696,14 +2699,16 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
case INDEX_op_shl_i64:
if (const_args[2]) {
- tcg_out_shli64(s, args[0], args[1], args[2]);
+ /* Limit immediate shift count lest we create an illegal insn. */
+ tcg_out_shli64(s, args[0], args[1], args[2] & 63);
} else {
tcg_out32(s, SLD | SAB(args[1], args[0], args[2]));
}
break;
case INDEX_op_shr_i64:
if (const_args[2]) {
- tcg_out_shri64(s, args[0], args[1], args[2]);
+ /* Limit immediate shift count lest we create an illegal insn. */
+ tcg_out_shri64(s, args[0], args[1], args[2] & 63);
} else {
tcg_out32(s, SRD | SAB(args[1], args[0], args[2]));
}
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
index e60b74f..4b8a473 100644
--- a/tcg/tcg-op.c
+++ b/tcg/tcg-op.c
@@ -3189,8 +3189,9 @@ static void do_nonatomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val,
memop = tcg_canonicalize_memop(memop, 0, 0);
- tcg_gen_qemu_ld_i32(t1, addr, idx, memop & ~MO_SIGN);
- gen(t2, t1, val);
+ tcg_gen_qemu_ld_i32(t1, addr, idx, memop);
+ tcg_gen_ext_i32(t2, val, memop);
+ gen(t2, t1, t2);
tcg_gen_qemu_st_i32(t2, addr, idx, memop);
tcg_gen_ext_i32(ret, (new_val ? t2 : t1), memop);
@@ -3232,8 +3233,9 @@ static void do_nonatomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val,
memop = tcg_canonicalize_memop(memop, 1, 0);
- tcg_gen_qemu_ld_i64(t1, addr, idx, memop & ~MO_SIGN);
- gen(t2, t1, val);
+ tcg_gen_qemu_ld_i64(t1, addr, idx, memop);
+ tcg_gen_ext_i64(t2, val, memop);
+ gen(t2, t1, t2);
tcg_gen_qemu_st_i64(t2, addr, idx, memop);
tcg_gen_ext_i64(ret, (new_val ? t2 : t1), memop);