diff options
author | Richard Henderson <rth@twiddle.net> | 2014-03-18 08:44:05 -0700 |
---|---|---|
committer | Richard Henderson <rth@twiddle.net> | 2014-04-18 16:57:36 -0700 |
commit | 1976cccec8a9965ff3fd6f026783a04f6b4959fd (patch) | |
tree | 5c9a19840b4caf88c32051ee6729d2acdf55bcf9 /tci.c | |
parent | 50c5c4d12557ede48c573e5138542061acd83500 (diff) | |
download | qemu-1976cccec8a9965ff3fd6f026783a04f6b4959fd.zip qemu-1976cccec8a9965ff3fd6f026783a04f6b4959fd.tar.gz qemu-1976cccec8a9965ff3fd6f026783a04f6b4959fd.tar.bz2 |
tci: Mask shift counts to avoid undefined behavior
TCG now requires unspecified behavior rather than a potential crash,
bring the C shift within the letter of the law.
Signed-off-by: Richard Henderson <rth@twiddle.net>
Diffstat (limited to 'tci.c')
-rw-r--r-- | tci.c | 20 |
1 files changed, 10 insertions, 10 deletions
@@ -669,32 +669,32 @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr) t0 = *tb_ptr++; t1 = tci_read_ri32(&tb_ptr); t2 = tci_read_ri32(&tb_ptr); - tci_write_reg32(t0, t1 << t2); + tci_write_reg32(t0, t1 << (t2 & 31)); break; case INDEX_op_shr_i32: t0 = *tb_ptr++; t1 = tci_read_ri32(&tb_ptr); t2 = tci_read_ri32(&tb_ptr); - tci_write_reg32(t0, t1 >> t2); + tci_write_reg32(t0, t1 >> (t2 & 31)); break; case INDEX_op_sar_i32: t0 = *tb_ptr++; t1 = tci_read_ri32(&tb_ptr); t2 = tci_read_ri32(&tb_ptr); - tci_write_reg32(t0, ((int32_t)t1 >> t2)); + tci_write_reg32(t0, ((int32_t)t1 >> (t2 & 31))); break; #if TCG_TARGET_HAS_rot_i32 case INDEX_op_rotl_i32: t0 = *tb_ptr++; t1 = tci_read_ri32(&tb_ptr); t2 = tci_read_ri32(&tb_ptr); - tci_write_reg32(t0, rol32(t1, t2)); + tci_write_reg32(t0, rol32(t1, t2 & 31)); break; case INDEX_op_rotr_i32: t0 = *tb_ptr++; t1 = tci_read_ri32(&tb_ptr); t2 = tci_read_ri32(&tb_ptr); - tci_write_reg32(t0, ror32(t1, t2)); + tci_write_reg32(t0, ror32(t1, t2 & 31)); break; #endif #if TCG_TARGET_HAS_deposit_i32 @@ -936,32 +936,32 @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr) t0 = *tb_ptr++; t1 = tci_read_ri64(&tb_ptr); t2 = tci_read_ri64(&tb_ptr); - tci_write_reg64(t0, t1 << t2); + tci_write_reg64(t0, t1 << (t2 & 63)); break; case INDEX_op_shr_i64: t0 = *tb_ptr++; t1 = tci_read_ri64(&tb_ptr); t2 = tci_read_ri64(&tb_ptr); - tci_write_reg64(t0, t1 >> t2); + tci_write_reg64(t0, t1 >> (t2 & 63)); break; case INDEX_op_sar_i64: t0 = *tb_ptr++; t1 = tci_read_ri64(&tb_ptr); t2 = tci_read_ri64(&tb_ptr); - tci_write_reg64(t0, ((int64_t)t1 >> t2)); + tci_write_reg64(t0, ((int64_t)t1 >> (t2 & 63))); break; #if TCG_TARGET_HAS_rot_i64 case INDEX_op_rotl_i64: t0 = *tb_ptr++; t1 = tci_read_ri64(&tb_ptr); t2 = tci_read_ri64(&tb_ptr); - tci_write_reg64(t0, rol64(t1, t2)); + tci_write_reg64(t0, rol64(t1, t2 & 63)); break; case INDEX_op_rotr_i64: t0 = *tb_ptr++; t1 = tci_read_ri64(&tb_ptr); t2 = tci_read_ri64(&tb_ptr); - tci_write_reg64(t0, ror64(t1, t2)); + tci_write_reg64(t0, ror64(t1, t2 & 63)); break; #endif #if TCG_TARGET_HAS_deposit_i64 |