summaryrefslogtreecommitdiff
path: root/tcg/optimize.c
diff options
context:
space:
mode:
authorRichard Henderson <rth@twiddle.net>2014-03-18 07:45:39 -0700
committerRichard Henderson <rth@twiddle.net>2014-04-18 16:57:36 -0700
commit50c5c4d12557ede48c573e5138542061acd83500 (patch)
tree62522d36a81c14cc46b1db93766fc4f118d1dc95 /tcg/optimize.c
parent20022fa15f6a8ddc24a8f9d7d177312fecc7fb3a (diff)
downloadqemu-50c5c4d12557ede48c573e5138542061acd83500.tar.gz
tcg: Mask shift quantities while folding
The TCG result would be undefined, but we can at least produce one plausible result and avoid triggering the wrath of analysis tools. Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Richard Henderson <rth@twiddle.net>
Diffstat (limited to 'tcg/optimize.c')
-rw-r--r--tcg/optimize.c35
1 files changed, 20 insertions, 15 deletions
diff --git a/tcg/optimize.c b/tcg/optimize.c
index 7777743e88..2fb708ed40 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -220,34 +220,34 @@ static TCGArg do_constant_folding_2(TCGOpcode op, TCGArg x, TCGArg y)
return x ^ y;
case INDEX_op_shl_i32:
- return (uint32_t)x << (uint32_t)y;
+ return (uint32_t)x << (y & 31);
case INDEX_op_shl_i64:
- return (uint64_t)x << (uint64_t)y;
+ return (uint64_t)x << (y & 63);
case INDEX_op_shr_i32:
- return (uint32_t)x >> (uint32_t)y;
+ return (uint32_t)x >> (y & 31);
case INDEX_op_shr_i64:
- return (uint64_t)x >> (uint64_t)y;
+ return (uint64_t)x >> (y & 63);
case INDEX_op_sar_i32:
- return (int32_t)x >> (int32_t)y;
+ return (int32_t)x >> (y & 31);
case INDEX_op_sar_i64:
- return (int64_t)x >> (int64_t)y;
+ return (int64_t)x >> (y & 63);
case INDEX_op_rotr_i32:
- return ror32(x, y);
+ return ror32(x, y & 31);
case INDEX_op_rotr_i64:
- return ror64(x, y);
+ return ror64(x, y & 63);
case INDEX_op_rotl_i32:
- return rol32(x, y);
+ return rol32(x, y & 31);
case INDEX_op_rotl_i64:
- return rol64(x, y);
+ return rol64(x, y & 63);
CASE_OP_32_64(not):
return ~x;
@@ -806,29 +806,34 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
case INDEX_op_sar_i32:
if (temps[args[2]].state == TCG_TEMP_CONST) {
- mask = (int32_t)temps[args[1]].mask >> temps[args[2]].val;
+ tmp = temps[args[2]].val & 31;
+ mask = (int32_t)temps[args[1]].mask >> tmp;
}
break;
case INDEX_op_sar_i64:
if (temps[args[2]].state == TCG_TEMP_CONST) {
- mask = (int64_t)temps[args[1]].mask >> temps[args[2]].val;
+ tmp = temps[args[2]].val & 63;
+ mask = (int64_t)temps[args[1]].mask >> tmp;
}
break;
case INDEX_op_shr_i32:
if (temps[args[2]].state == TCG_TEMP_CONST) {
- mask = (uint32_t)temps[args[1]].mask >> temps[args[2]].val;
+ tmp = temps[args[2]].val & 31;
+ mask = (uint32_t)temps[args[1]].mask >> tmp;
}
break;
case INDEX_op_shr_i64:
if (temps[args[2]].state == TCG_TEMP_CONST) {
- mask = (uint64_t)temps[args[1]].mask >> temps[args[2]].val;
+ tmp = temps[args[2]].val & 63;
+ mask = (uint64_t)temps[args[1]].mask >> tmp;
}
break;
CASE_OP_32_64(shl):
if (temps[args[2]].state == TCG_TEMP_CONST) {
- mask = temps[args[1]].mask << temps[args[2]].val;
+ tmp = temps[args[2]].val & (TCG_TARGET_REG_BITS - 1);
+ mask = temps[args[1]].mask << tmp;
}
break;