aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBlue Swirl <blauwirbel@gmail.com>2010-05-09 15:40:24 +0000
committerBlue Swirl <blauwirbel@gmail.com>2010-05-09 15:40:24 +0000
commit275ea26546466446cf2ed83a93aa50e94538c203 (patch)
treea9bc1d08f34a796de32fb512127f9539a4363de5
parent72139e83a98eba2bfed2dbc2db2818fb19e47ca0 (diff)
downloadqemu-275ea26546466446cf2ed83a93aa50e94538c203.zip
qemu-275ea26546466446cf2ed83a93aa50e94538c203.tar.gz
qemu-275ea26546466446cf2ed83a93aa50e94538c203.tar.bz2
sparc: lazy C flag calculation
Calculate only the carry flag for ADDX/SUBX instead of full set of flags. Thanks to Igor Kovalenko for spotting a bug with an earlier version. Signed-off-by: Blue Swirl <blauwirbel@gmail.com>
-rw-r--r--target-sparc/translate.c24
1 files changed, 8 insertions, 16 deletions
diff --git a/target-sparc/translate.c b/target-sparc/translate.c
index be2a116..ea7c71b 100644
--- a/target-sparc/translate.c
+++ b/target-sparc/translate.c
@@ -334,9 +334,9 @@ static inline void gen_op_add_cc(TCGv dst, TCGv src1, TCGv src2)
static inline void gen_op_addxi_cc(TCGv dst, TCGv src1, target_long src2)
{
+ gen_helper_compute_C_icc(cpu_tmp0);
tcg_gen_mov_tl(cpu_cc_src, src1);
tcg_gen_movi_tl(cpu_cc_src2, src2);
- gen_mov_reg_C(cpu_tmp0, cpu_psr);
tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_tmp0);
tcg_gen_addi_tl(cpu_cc_dst, cpu_cc_dst, src2);
tcg_gen_mov_tl(dst, cpu_cc_dst);
@@ -344,9 +344,9 @@ static inline void gen_op_addxi_cc(TCGv dst, TCGv src1, target_long src2)
static inline void gen_op_addx_cc(TCGv dst, TCGv src1, TCGv src2)
{
+ gen_helper_compute_C_icc(cpu_tmp0);
tcg_gen_mov_tl(cpu_cc_src, src1);
tcg_gen_mov_tl(cpu_cc_src2, src2);
- gen_mov_reg_C(cpu_tmp0, cpu_psr);
tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_tmp0);
tcg_gen_add_tl(cpu_cc_dst, cpu_cc_dst, cpu_cc_src2);
tcg_gen_mov_tl(dst, cpu_cc_dst);
@@ -417,9 +417,9 @@ static inline void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2)
static inline void gen_op_subxi_cc(TCGv dst, TCGv src1, target_long src2)
{
+ gen_helper_compute_C_icc(cpu_tmp0);
tcg_gen_mov_tl(cpu_cc_src, src1);
tcg_gen_movi_tl(cpu_cc_src2, src2);
- gen_mov_reg_C(cpu_tmp0, cpu_psr);
tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_tmp0);
tcg_gen_subi_tl(cpu_cc_dst, cpu_cc_dst, src2);
tcg_gen_mov_tl(dst, cpu_cc_dst);
@@ -427,9 +427,9 @@ static inline void gen_op_subxi_cc(TCGv dst, TCGv src1, target_long src2)
static inline void gen_op_subx_cc(TCGv dst, TCGv src1, TCGv src2)
{
+ gen_helper_compute_C_icc(cpu_tmp0);
tcg_gen_mov_tl(cpu_cc_src, src1);
tcg_gen_mov_tl(cpu_cc_src2, src2);
- gen_mov_reg_C(cpu_tmp0, cpu_psr);
tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_tmp0);
tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_dst, cpu_cc_src2);
tcg_gen_mov_tl(dst, cpu_cc_dst);
@@ -2953,25 +2953,21 @@ static void disas_sparc_insn(DisasContext * dc)
if (IS_IMM) {
simm = GET_FIELDs(insn, 19, 31);
if (xop & 0x10) {
- gen_helper_compute_psr();
gen_op_addxi_cc(cpu_dst, cpu_src1, simm);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADDX);
dc->cc_op = CC_OP_ADDX;
} else {
- gen_helper_compute_psr();
- gen_mov_reg_C(cpu_tmp0, cpu_psr);
+ gen_helper_compute_C_icc(cpu_tmp0);
tcg_gen_addi_tl(cpu_tmp0, cpu_tmp0, simm);
tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_tmp0);
}
} else {
if (xop & 0x10) {
- gen_helper_compute_psr();
gen_op_addx_cc(cpu_dst, cpu_src1, cpu_src2);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADDX);
dc->cc_op = CC_OP_ADDX;
} else {
- gen_helper_compute_psr();
- gen_mov_reg_C(cpu_tmp0, cpu_psr);
+ gen_helper_compute_C_icc(cpu_tmp0);
tcg_gen_add_tl(cpu_tmp0, cpu_src2, cpu_tmp0);
tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_tmp0);
}
@@ -3009,25 +3005,21 @@ static void disas_sparc_insn(DisasContext * dc)
if (IS_IMM) {
simm = GET_FIELDs(insn, 19, 31);
if (xop & 0x10) {
- gen_helper_compute_psr();
gen_op_subxi_cc(cpu_dst, cpu_src1, simm);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUBX);
dc->cc_op = CC_OP_SUBX;
} else {
- gen_helper_compute_psr();
- gen_mov_reg_C(cpu_tmp0, cpu_psr);
+ gen_helper_compute_C_icc(cpu_tmp0);
tcg_gen_addi_tl(cpu_tmp0, cpu_tmp0, simm);
tcg_gen_sub_tl(cpu_dst, cpu_src1, cpu_tmp0);
}
} else {
if (xop & 0x10) {
- gen_helper_compute_psr();
gen_op_subx_cc(cpu_dst, cpu_src1, cpu_src2);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUBX);
dc->cc_op = CC_OP_SUBX;
} else {
- gen_helper_compute_psr();
- gen_mov_reg_C(cpu_tmp0, cpu_psr);
+ gen_helper_compute_C_icc(cpu_tmp0);
tcg_gen_add_tl(cpu_tmp0, cpu_src2, cpu_tmp0);
tcg_gen_sub_tl(cpu_dst, cpu_src1, cpu_tmp0);
}