diff --git a/qemu/tcg/optimize.c b/qemu/tcg/optimize.c index 7d29e757..fbe6c61c 100644 --- a/qemu/tcg/optimize.c +++ b/qemu/tcg/optimize.c @@ -348,9 +348,11 @@ static TCGArg do_constant_folding_2(TCGOpcode op, TCGArg x, TCGArg y) CASE_OP_32_64(ext16u): return (uint16_t)x; + case INDEX_op_ext_i32_i64: case INDEX_op_ext32s_i64: return (int32_t)x; + case INDEX_op_extu_i32_i64: case INDEX_op_ext32u_i64: return (uint32_t)x; @@ -849,6 +851,15 @@ void tcg_optimize(TCGContext *s) mask = temps[args[1]].mask & mask; break; + case INDEX_op_ext_i32_i64: + if ((temps[args[1]].mask & 0x80000000) != 0) { + break; + } + case INDEX_op_extu_i32_i64: + /* We do not compute affected as it is a size changing op. */ + mask = (uint32_t)temps[args[1]].mask; + break; + CASE_OP_32_64(andc): /* Known-zeros does not imply known-ones. Therefore unless args[2] is constant, we can't infer anything from it. */ @@ -1027,6 +1038,8 @@ void tcg_optimize(TCGContext *s) CASE_OP_32_64(ext16u): case INDEX_op_ext32s_i64: case INDEX_op_ext32u_i64: + case INDEX_op_ext_i32_i64: + case INDEX_op_extu_i32_i64: if (temp_is_const(s, args[1])) { tmp = do_constant_folding(s, opc, temps[args[1]].val, 0); tcg_opt_gen_movi(s, op, args, args[0], tmp);