aboutsummaryrefslogtreecommitdiff
path: root/tcg
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2024-12-08 19:57:28 -0600
committerRichard Henderson <richard.henderson@linaro.org>2024-12-24 08:32:14 -0800
commitc7739ab83e02b93cb15f54984c3f66ba3c5bd8d2 (patch)
tree00f0ffbe1abe3152658125cf21cf41ffd7a70f0f /tcg
parent81be07f905b187743b69adeb2877e5a9efc00d8e (diff)
downloadqemu-c7739ab83e02b93cb15f54984c3f66ba3c5bd8d2.zip
qemu-c7739ab83e02b93cb15f54984c3f66ba3c5bd8d2.tar.gz
qemu-c7739ab83e02b93cb15f54984c3f66ba3c5bd8d2.tar.bz2
tcg/optimize: Use fold_and and fold_masks_z in fold_deposit
Avoid the use of the OptContext slots. Find TempOptInfo once. When we fold to and, use fold_and. Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'tcg')
-rw-r--r--tcg/optimize.c35
1 files changed, 17 insertions, 18 deletions
diff --git a/tcg/optimize.c b/tcg/optimize.c
index 2f5030c..c0f0390 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -1625,14 +1625,17 @@ static bool fold_ctpop(OptContext *ctx, TCGOp *op)
static bool fold_deposit(OptContext *ctx, TCGOp *op)
{
+ TempOptInfo *t1 = arg_info(op->args[1]);
+ TempOptInfo *t2 = arg_info(op->args[2]);
+ int ofs = op->args[3];
+ int len = op->args[4];
TCGOpcode and_opc;
+ uint64_t z_mask;
- if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
- uint64_t t1 = arg_info(op->args[1])->val;
- uint64_t t2 = arg_info(op->args[2])->val;
-
- t1 = deposit64(t1, op->args[3], op->args[4], t2);
- return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
+ if (ti_is_const(t1) && ti_is_const(t2)) {
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
+ deposit64(ti_const_val(t1), ofs, len,
+ ti_const_val(t2)));
}
switch (ctx->type) {
@@ -1647,30 +1650,26 @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
}
/* Inserting a value into zero at offset 0. */
- if (arg_is_const_val(op->args[1], 0) && op->args[3] == 0) {
- uint64_t mask = MAKE_64BIT_MASK(0, op->args[4]);
+ if (ti_is_const_val(t1, 0) && ofs == 0) {
+ uint64_t mask = MAKE_64BIT_MASK(0, len);
op->opc = and_opc;
op->args[1] = op->args[2];
op->args[2] = arg_new_constant(ctx, mask);
- ctx->z_mask = mask & arg_info(op->args[1])->z_mask;
- return false;
+ return fold_and(ctx, op);
}
/* Inserting zero into a value. */
- if (arg_is_const_val(op->args[2], 0)) {
- uint64_t mask = deposit64(-1, op->args[3], op->args[4], 0);
+ if (ti_is_const_val(t2, 0)) {
+ uint64_t mask = deposit64(-1, ofs, len, 0);
op->opc = and_opc;
op->args[2] = arg_new_constant(ctx, mask);
- ctx->z_mask = mask & arg_info(op->args[1])->z_mask;
- return false;
+ return fold_and(ctx, op);
}
- ctx->z_mask = deposit64(arg_info(op->args[1])->z_mask,
- op->args[3], op->args[4],
- arg_info(op->args[2])->z_mask);
- return false;
+ z_mask = deposit64(t1->z_mask, ofs, len, t2->z_mask);
+ return fold_masks_z(ctx, op, z_mask);
}
static bool fold_divide(OptContext *ctx, TCGOp *op)