From 03fc0548b70393b0c8d43703591a9e34fb8e3123 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Thu, 28 Mar 2013 05:37:51 +0000 Subject: tci: Use 32-bit signed offsets to loads/stores Since the change to tcg_exit_req, the first insn of every TB is a load with a negative offset from env. Signed-off-by: Richard Henderson Signed-off by: Stefan Weil --- tci.c | 36 ++++++++++++++++++++++-------------- 1 file changed, 22 insertions(+), 14 deletions(-) (limited to 'tci.c') diff --git a/tci.c b/tci.c index 2b2c11f259..9ce0be372a 100644 --- a/tci.c +++ b/tci.c @@ -182,7 +182,7 @@ static tcg_target_ulong tci_read_i(uint8_t **tb_ptr) return value; } -/* Read constant (32 bit) from bytecode. */ +/* Read unsigned constant (32 bit) from bytecode. */ static uint32_t tci_read_i32(uint8_t **tb_ptr) { uint32_t value = *(uint32_t *)(*tb_ptr); @@ -190,6 +190,14 @@ static uint32_t tci_read_i32(uint8_t **tb_ptr) return value; } +/* Read signed constant (32 bit) from bytecode. */ +static int32_t tci_read_s32(uint8_t **tb_ptr) +{ + int32_t value = *(int32_t *)(*tb_ptr); + *tb_ptr += sizeof(value); + return value; +} + #if TCG_TARGET_REG_BITS == 64 /* Read constant (64 bit) from bytecode. */ static uint64_t tci_read_i64(uint8_t **tb_ptr) @@ -550,7 +558,7 @@ tcg_target_ulong tcg_qemu_tb_exec(CPUArchState *cpustate, uint8_t *tb_ptr) case INDEX_op_ld8u_i32: t0 = *tb_ptr++; t1 = tci_read_r(&tb_ptr); - t2 = tci_read_i32(&tb_ptr); + t2 = tci_read_s32(&tb_ptr); tci_write_reg8(t0, *(uint8_t *)(t1 + t2)); break; case INDEX_op_ld8s_i32: @@ -563,25 +571,25 @@ tcg_target_ulong tcg_qemu_tb_exec(CPUArchState *cpustate, uint8_t *tb_ptr) case INDEX_op_ld_i32: t0 = *tb_ptr++; t1 = tci_read_r(&tb_ptr); - t2 = tci_read_i32(&tb_ptr); + t2 = tci_read_s32(&tb_ptr); tci_write_reg32(t0, *(uint32_t *)(t1 + t2)); break; case INDEX_op_st8_i32: t0 = tci_read_r8(&tb_ptr); t1 = tci_read_r(&tb_ptr); - t2 = tci_read_i32(&tb_ptr); + t2 = tci_read_s32(&tb_ptr); *(uint8_t *)(t1 + t2) = t0; break; case INDEX_op_st16_i32: t0 = tci_read_r16(&tb_ptr); t1 = tci_read_r(&tb_ptr); - t2 = tci_read_i32(&tb_ptr); + t2 = tci_read_s32(&tb_ptr); *(uint16_t *)(t1 + t2) = t0; break; case INDEX_op_st_i32: t0 = tci_read_r32(&tb_ptr); t1 = tci_read_r(&tb_ptr); - t2 = tci_read_i32(&tb_ptr); + t2 = tci_read_s32(&tb_ptr); *(uint32_t *)(t1 + t2) = t0; break; @@ -818,7 +826,7 @@ tcg_target_ulong tcg_qemu_tb_exec(CPUArchState *cpustate, uint8_t *tb_ptr) case INDEX_op_ld8u_i64: t0 = *tb_ptr++; t1 = tci_read_r(&tb_ptr); - t2 = tci_read_i32(&tb_ptr); + t2 = tci_read_s32(&tb_ptr); tci_write_reg8(t0, *(uint8_t *)(t1 + t2)); break; case INDEX_op_ld8s_i64: @@ -829,43 +837,43 @@ tcg_target_ulong tcg_qemu_tb_exec(CPUArchState *cpustate, uint8_t *tb_ptr) case INDEX_op_ld32u_i64: t0 = *tb_ptr++; t1 = tci_read_r(&tb_ptr); - t2 = tci_read_i32(&tb_ptr); + t2 = tci_read_s32(&tb_ptr); tci_write_reg32(t0, *(uint32_t *)(t1 + t2)); break; case INDEX_op_ld32s_i64: t0 = *tb_ptr++; t1 = tci_read_r(&tb_ptr); - t2 = tci_read_i32(&tb_ptr); + t2 = tci_read_s32(&tb_ptr); tci_write_reg32s(t0, *(int32_t *)(t1 + t2)); break; case INDEX_op_ld_i64: t0 = *tb_ptr++; t1 = tci_read_r(&tb_ptr); - t2 = tci_read_i32(&tb_ptr); + t2 = tci_read_s32(&tb_ptr); tci_write_reg64(t0, *(uint64_t *)(t1 + t2)); break; case INDEX_op_st8_i64: t0 = tci_read_r8(&tb_ptr); t1 = tci_read_r(&tb_ptr); - t2 = tci_read_i32(&tb_ptr); + t2 = tci_read_s32(&tb_ptr); *(uint8_t *)(t1 + t2) = t0; break; case INDEX_op_st16_i64: t0 = tci_read_r16(&tb_ptr); t1 = tci_read_r(&tb_ptr); - t2 = tci_read_i32(&tb_ptr); + t2 = tci_read_s32(&tb_ptr); *(uint16_t *)(t1 + t2) = t0; break; case INDEX_op_st32_i64: t0 = tci_read_r32(&tb_ptr); t1 = tci_read_r(&tb_ptr); - t2 = tci_read_i32(&tb_ptr); + t2 = tci_read_s32(&tb_ptr); *(uint32_t *)(t1 + t2) = t0; break; case INDEX_op_st_i64: t0 = tci_read_r64(&tb_ptr); t1 = tci_read_r(&tb_ptr); - t2 = tci_read_i32(&tb_ptr); + t2 = tci_read_s32(&tb_ptr); *(uint64_t *)(t1 + t2) = t0; break; -- cgit v1.2.1