summaryrefslogtreecommitdiff
path: root/tcg
diff options
context:
space:
mode:
Diffstat (limited to 'tcg')
-rw-r--r--tcg/aarch64/tcg-target.c27
-rw-r--r--tcg/i386/tcg-target.c15
-rw-r--r--tcg/ia64/tcg-target.c2
-rw-r--r--tcg/tcg.c4
-rw-r--r--tcg/tcg.h23
5 files changed, 55 insertions, 16 deletions
diff --git a/tcg/aarch64/tcg-target.c b/tcg/aarch64/tcg-target.c
index 01ae610cd7..0ed10a9741 100644
--- a/tcg/aarch64/tcg-target.c
+++ b/tcg/aarch64/tcg-target.c
@@ -56,6 +56,11 @@ static const int tcg_target_call_oarg_regs[1] = {
#define TCG_REG_TMP TCG_REG_X30
#ifndef CONFIG_SOFTMMU
+/* Note that XZR cannot be encoded in the address base register slot,
+ as that actaully encodes SP. So if we need to zero-extend the guest
+ address, via the address index register slot, we need to load even
+ a zero guest base into a register. */
+#define USE_GUEST_BASE (guest_base != 0 || TARGET_LONG_BITS == 32)
#define TCG_REG_GUEST_BASE TCG_REG_X28
#endif
@@ -1224,9 +1229,13 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
add_qemu_ldst_label(s, true, oi, ext, data_reg, addr_reg,
s->code_ptr, label_ptr);
#else /* !CONFIG_SOFTMMU */
- tcg_out_qemu_ld_direct(s, memop, ext, data_reg,
- guest_base ? TCG_REG_GUEST_BASE : TCG_REG_XZR,
- otype, addr_reg);
+ if (USE_GUEST_BASE) {
+ tcg_out_qemu_ld_direct(s, memop, ext, data_reg,
+ TCG_REG_GUEST_BASE, otype, addr_reg);
+ } else {
+ tcg_out_qemu_ld_direct(s, memop, ext, data_reg,
+ addr_reg, TCG_TYPE_I64, TCG_REG_XZR);
+ }
#endif /* CONFIG_SOFTMMU */
}
@@ -1245,9 +1254,13 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
add_qemu_ldst_label(s, false, oi, (memop & MO_SIZE)== MO_64,
data_reg, addr_reg, s->code_ptr, label_ptr);
#else /* !CONFIG_SOFTMMU */
- tcg_out_qemu_st_direct(s, memop, data_reg,
- guest_base ? TCG_REG_GUEST_BASE : TCG_REG_XZR,
- otype, addr_reg);
+ if (USE_GUEST_BASE) {
+ tcg_out_qemu_st_direct(s, memop, data_reg,
+ TCG_REG_GUEST_BASE, otype, addr_reg);
+ } else {
+ tcg_out_qemu_st_direct(s, memop, data_reg,
+ addr_reg, TCG_TYPE_I64, TCG_REG_XZR);
+ }
#endif /* CONFIG_SOFTMMU */
}
@@ -1806,7 +1819,7 @@ static void tcg_target_qemu_prologue(TCGContext *s)
CPU_TEMP_BUF_NLONGS * sizeof(long));
#if !defined(CONFIG_SOFTMMU)
- if (guest_base) {
+ if (USE_GUEST_BASE) {
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, guest_base);
tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE);
}
diff --git a/tcg/i386/tcg-target.c b/tcg/i386/tcg-target.c
index d2adbc4d17..9187d34caf 100644
--- a/tcg/i386/tcg-target.c
+++ b/tcg/i386/tcg-target.c
@@ -1178,8 +1178,8 @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
const TCGReg r0 = TCG_REG_L0;
const TCGReg r1 = TCG_REG_L1;
TCGType ttype = TCG_TYPE_I32;
- TCGType htype = TCG_TYPE_I32;
- int trexw = 0, hrexw = 0;
+ TCGType tlbtype = TCG_TYPE_I32;
+ int trexw = 0, hrexw = 0, tlbrexw = 0;
int s_mask = (1 << (opc & MO_SIZE)) - 1;
bool aligned = (opc & MO_AMASK) == MO_ALIGN || s_mask == 0;
@@ -1189,12 +1189,15 @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
trexw = P_REXW;
}
if (TCG_TYPE_PTR == TCG_TYPE_I64) {
- htype = TCG_TYPE_I64;
hrexw = P_REXW;
+ if (TARGET_PAGE_BITS + CPU_TLB_BITS > 32) {
+ tlbtype = TCG_TYPE_I64;
+ tlbrexw = P_REXW;
+ }
}
}
- tcg_out_mov(s, htype, r0, addrlo);
+ tcg_out_mov(s, tlbtype, r0, addrlo);
if (aligned) {
tcg_out_mov(s, ttype, r1, addrlo);
} else {
@@ -1203,12 +1206,12 @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
tcg_out_modrm_offset(s, OPC_LEA + trexw, r1, addrlo, s_mask);
}
- tcg_out_shifti(s, SHIFT_SHR + hrexw, r0,
+ tcg_out_shifti(s, SHIFT_SHR + tlbrexw, r0,
TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
tgen_arithi(s, ARITH_AND + trexw, r1,
TARGET_PAGE_MASK | (aligned ? s_mask : 0), 0);
- tgen_arithi(s, ARITH_AND + hrexw, r0,
+ tgen_arithi(s, ARITH_AND + tlbrexw, r0,
(CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS, 0);
tcg_out_modrm_sib_offset(s, OPC_LEA + hrexw, r0, TCG_AREG0, r0, 0,
diff --git a/tcg/ia64/tcg-target.c b/tcg/ia64/tcg-target.c
index 3c07017868..647e9a6f29 100644
--- a/tcg/ia64/tcg-target.c
+++ b/tcg/ia64/tcg-target.c
@@ -1594,7 +1594,7 @@ static void tcg_out_tb_finalize(TCGContext *s)
/* The out-of-line thunks are all the same; load the return address
from B0, load the GP, and branch to the code. Note that we are
always post-call, so the register window has rolled, so we're
- using incomming parameter register numbers, not outgoing. */
+ using incoming parameter register numbers, not outgoing. */
if (dest == NULL) {
uintptr_t *desc = (uintptr_t *)helpers[x];
uintptr_t func = desc[0], gp = desc[1], disp;
diff --git a/tcg/tcg.c b/tcg/tcg.c
index 0892a9bbf6..f463e44639 100644
--- a/tcg/tcg.c
+++ b/tcg/tcg.c
@@ -1396,7 +1396,7 @@ static void tcg_liveness_analysis(TCGContext *s)
}
}
}
- /* input arguments are live for preceeding opcodes */
+ /* input arguments are live for preceding opcodes */
for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
arg = args[i];
dead_temps[arg] = 0;
@@ -1542,7 +1542,7 @@ static void tcg_liveness_analysis(TCGContext *s)
dead_args |= (1 << i);
}
}
- /* input arguments are live for preceeding opcodes */
+ /* input arguments are live for preceding opcodes */
for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
arg = args[i];
dead_temps[arg] = 0;
diff --git a/tcg/tcg.h b/tcg/tcg.h
index aa295b9554..879a665012 100644
--- a/tcg/tcg.h
+++ b/tcg/tcg.h
@@ -990,25 +990,48 @@ void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
TCGMemOpIdx oi, uintptr_t retaddr);
+uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr);
+uint16_t helper_le_ldw_cmmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr);
+uint32_t helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr);
+uint64_t helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr);
+uint16_t helper_be_ldw_cmmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr);
+uint32_t helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr);
+uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr);
+
/* Temporary aliases until backends are converted. */
#ifdef TARGET_WORDS_BIGENDIAN
# define helper_ret_ldsw_mmu helper_be_ldsw_mmu
# define helper_ret_lduw_mmu helper_be_lduw_mmu
# define helper_ret_ldsl_mmu helper_be_ldsl_mmu
# define helper_ret_ldul_mmu helper_be_ldul_mmu
+# define helper_ret_ldl_mmu helper_be_ldul_mmu
# define helper_ret_ldq_mmu helper_be_ldq_mmu
# define helper_ret_stw_mmu helper_be_stw_mmu
# define helper_ret_stl_mmu helper_be_stl_mmu
# define helper_ret_stq_mmu helper_be_stq_mmu
+# define helper_ret_ldw_cmmu helper_be_ldw_cmmu
+# define helper_ret_ldl_cmmu helper_be_ldl_cmmu
+# define helper_ret_ldq_cmmu helper_be_ldq_cmmu
#else
# define helper_ret_ldsw_mmu helper_le_ldsw_mmu
# define helper_ret_lduw_mmu helper_le_lduw_mmu
# define helper_ret_ldsl_mmu helper_le_ldsl_mmu
# define helper_ret_ldul_mmu helper_le_ldul_mmu
+# define helper_ret_ldl_mmu helper_le_ldul_mmu
# define helper_ret_ldq_mmu helper_le_ldq_mmu
# define helper_ret_stw_mmu helper_le_stw_mmu
# define helper_ret_stl_mmu helper_le_stl_mmu
# define helper_ret_stq_mmu helper_le_stq_mmu
+# define helper_ret_ldw_cmmu helper_le_ldw_cmmu
+# define helper_ret_ldl_cmmu helper_le_ldl_cmmu
+# define helper_ret_ldq_cmmu helper_le_ldq_cmmu
#endif
#endif /* CONFIG_SOFTMMU */