|
@@ -2650,30 +2650,32 @@ static bool fold_sub2(OptContext *ctx, TCGOp *op)
|
|
|
|
|
|
static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
|
|
static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
|
|
{
|
|
{
|
|
|
|
+ uint64_t z_mask = -1, s_mask = 0;
|
|
|
|
+
|
|
/* We can't do any folding with a load, but we can record bits. */
|
|
/* We can't do any folding with a load, but we can record bits. */
|
|
switch (op->opc) {
|
|
switch (op->opc) {
|
|
CASE_OP_32_64(ld8s):
|
|
CASE_OP_32_64(ld8s):
|
|
- ctx->s_mask = MAKE_64BIT_MASK(8, 56);
|
|
|
|
|
|
+ s_mask = INT8_MIN;
|
|
break;
|
|
break;
|
|
CASE_OP_32_64(ld8u):
|
|
CASE_OP_32_64(ld8u):
|
|
- ctx->z_mask = MAKE_64BIT_MASK(0, 8);
|
|
|
|
|
|
+ z_mask = MAKE_64BIT_MASK(0, 8);
|
|
break;
|
|
break;
|
|
CASE_OP_32_64(ld16s):
|
|
CASE_OP_32_64(ld16s):
|
|
- ctx->s_mask = MAKE_64BIT_MASK(16, 48);
|
|
|
|
|
|
+ s_mask = INT16_MIN;
|
|
break;
|
|
break;
|
|
CASE_OP_32_64(ld16u):
|
|
CASE_OP_32_64(ld16u):
|
|
- ctx->z_mask = MAKE_64BIT_MASK(0, 16);
|
|
|
|
|
|
+ z_mask = MAKE_64BIT_MASK(0, 16);
|
|
break;
|
|
break;
|
|
case INDEX_op_ld32s_i64:
|
|
case INDEX_op_ld32s_i64:
|
|
- ctx->s_mask = MAKE_64BIT_MASK(32, 32);
|
|
|
|
|
|
+ s_mask = INT32_MIN;
|
|
break;
|
|
break;
|
|
case INDEX_op_ld32u_i64:
|
|
case INDEX_op_ld32u_i64:
|
|
- ctx->z_mask = MAKE_64BIT_MASK(0, 32);
|
|
|
|
|
|
+ z_mask = MAKE_64BIT_MASK(0, 32);
|
|
break;
|
|
break;
|
|
default:
|
|
default:
|
|
g_assert_not_reached();
|
|
g_assert_not_reached();
|
|
}
|
|
}
|
|
- return false;
|
|
|
|
|
|
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
|
|
}
|
|
}
|
|
|
|
|
|
static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op)
|
|
static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op)
|