summaryrefslogtreecommitdiff
path: root/tcg
diff options
context:
space:
mode:
Diffstat (limited to 'tcg')
-rw-r--r--tcg/arm/tcg-target.c53
-rw-r--r--tcg/hppa/tcg-target.c147
-rw-r--r--tcg/i386/tcg-target.c283
-rw-r--r--tcg/ia64/tcg-target.c46
-rw-r--r--tcg/mips/tcg-target.c44
-rw-r--r--tcg/ppc/tcg-target.c45
-rw-r--r--tcg/ppc/tcg-target.h2
-rw-r--r--tcg/ppc64/tcg-target.c45
-rw-r--r--tcg/s390/tcg-target.c44
-rw-r--r--tcg/sparc/tcg-target.c124
-rw-r--r--tcg/tcg.c311
-rw-r--r--tcg/tcg.h11
-rw-r--r--tcg/tci/tcg-target.c6
-rw-r--r--tcg/tci/tcg-target.h2
14 files changed, 1051 insertions, 112 deletions
diff --git a/tcg/arm/tcg-target.c b/tcg/arm/tcg-target.c
index 5af21b3f5d..4d59a63855 100644
--- a/tcg/arm/tcg-target.c
+++ b/tcg/arm/tcg-target.c
@@ -929,6 +929,27 @@ static inline void tcg_out_goto_label(TCGContext *s, int cond, int label_index)
#include "../../softmmu_defs.h"
+#ifdef CONFIG_TCG_PASS_AREG0
+/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
+ int mmu_idx) */
+static const void * const qemu_ld_helpers[4] = {
+ helper_ldb_mmu,
+ helper_ldw_mmu,
+ helper_ldl_mmu,
+ helper_ldq_mmu,
+};
+
+/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
+ uintxx_t val, int mmu_idx) */
+static const void * const qemu_st_helpers[4] = {
+ helper_stb_mmu,
+ helper_stw_mmu,
+ helper_stl_mmu,
+ helper_stq_mmu,
+};
+#else
+/* legacy helper signature: __ld_mmu(target_ulong addr, int
+ mmu_idx) */
static void *qemu_ld_helpers[4] = {
__ldb_mmu,
__ldw_mmu,
@@ -936,6 +957,8 @@ static void *qemu_ld_helpers[4] = {
__ldq_mmu,
};
+/* legacy helper signature: __st_mmu(target_ulong addr, uintxx_t val,
+ int mmu_idx) */
static void *qemu_st_helpers[4] = {
__stb_mmu,
__stw_mmu,
@@ -943,6 +966,7 @@ static void *qemu_st_helpers[4] = {
__stq_mmu,
};
#endif
+#endif
#define TLB_SHIFT (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS)
@@ -1075,6 +1099,19 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
TCG_REG_R1, 0, addr_reg2, SHIFT_IMM_LSL(0));
tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R2, 0, mem_index);
# endif
+#ifdef CONFIG_TCG_PASS_AREG0
+ /* XXX/FIXME: suboptimal and incorrect for 64 bit */
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
+ tcg_target_call_iarg_regs[2], 0,
+ tcg_target_call_iarg_regs[1], SHIFT_IMM_LSL(0));
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
+ tcg_target_call_iarg_regs[1], 0,
+ tcg_target_call_iarg_regs[0], SHIFT_IMM_LSL(0));
+
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
+ tcg_target_call_iarg_regs[0], 0, TCG_AREG0,
+ SHIFT_IMM_LSL(0));
+#endif
tcg_out_call(s, (tcg_target_long) qemu_ld_helpers[s_bits]);
switch (opc) {
@@ -1341,6 +1378,22 @@ static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
}
# endif
+#ifdef CONFIG_TCG_PASS_AREG0
+ /* XXX/FIXME: suboptimal and incorrect for 64 bit */
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
+ tcg_target_call_iarg_regs[3], 0,
+ tcg_target_call_iarg_regs[2], SHIFT_IMM_LSL(0));
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
+ tcg_target_call_iarg_regs[2], 0,
+ tcg_target_call_iarg_regs[1], SHIFT_IMM_LSL(0));
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
+ tcg_target_call_iarg_regs[1], 0,
+ tcg_target_call_iarg_regs[0], SHIFT_IMM_LSL(0));
+
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
+ tcg_target_call_iarg_regs[0], 0, TCG_AREG0,
+ SHIFT_IMM_LSL(0));
+#endif
tcg_out_call(s, (tcg_target_long) qemu_st_helpers[s_bits]);
if (opc == 3)
tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R13, TCG_REG_R13, 0x10);
diff --git a/tcg/hppa/tcg-target.c b/tcg/hppa/tcg-target.c
index c5a3730a2b..2885212e3a 100644
--- a/tcg/hppa/tcg-target.c
+++ b/tcg/hppa/tcg-target.c
@@ -336,7 +336,7 @@ static int tcg_target_const_match(tcg_target_long val,
#define INSN_COMIBF (INSN_OP(0x23))
/* supplied by libgcc */
-extern void *__canonicalize_funcptr_for_compare(void *);
+extern void *__canonicalize_funcptr_for_compare(const void *);
static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
{
@@ -628,7 +628,7 @@ static void tcg_out_bswap32(TCGContext *s, int ret, int arg, int temp)
tcg_out_shd(s, ret, arg, temp, 8); /* ret = DCBA */
}
-static void tcg_out_call(TCGContext *s, void *func)
+static void tcg_out_call(TCGContext *s, const void *func)
{
tcg_target_long val, hi, lo, disp;
@@ -882,6 +882,27 @@ static void tcg_out_setcond2(TCGContext *s, int cond, TCGArg ret,
#if defined(CONFIG_SOFTMMU)
#include "../../softmmu_defs.h"
+#ifdef CONFIG_TCG_PASS_AREG0
+/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
+ int mmu_idx) */
+static const void * const qemu_ld_helpers[4] = {
+ helper_ldb_mmu,
+ helper_ldw_mmu,
+ helper_ldl_mmu,
+ helper_ldq_mmu,
+};
+
+/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
+ uintxx_t val, int mmu_idx) */
+static const void * const qemu_st_helpers[4] = {
+ helper_stb_mmu,
+ helper_stw_mmu,
+ helper_stl_mmu,
+ helper_stq_mmu,
+};
+#else
+/* legacy helper signature: __ld_mmu(target_ulong addr, int
+ mmu_idx) */
static void *qemu_ld_helpers[4] = {
__ldb_mmu,
__ldw_mmu,
@@ -889,12 +910,15 @@ static void *qemu_ld_helpers[4] = {
__ldq_mmu,
};
+/* legacy helper signature: __st_mmu(target_ulong addr, uintxx_t val,
+ int mmu_idx) */
static void *qemu_st_helpers[4] = {
__stb_mmu,
__stw_mmu,
__stl_mmu,
__stq_mmu,
};
+#endif
/* Load and compare a TLB entry, and branch if TLB miss. OFFSET is set to
the offset of the first ADDR_READ or ADDR_WRITE member of the appropriate
@@ -1061,6 +1085,15 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
}
tcg_out_movi(s, TCG_TYPE_I32, argreg, mem_index);
+#ifdef CONFIG_TCG_PASS_AREG0
+ /* XXX/FIXME: suboptimal */
+ tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2],
+ tcg_target_call_iarg_regs[1]);
+ tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1],
+ tcg_target_call_iarg_regs[0]);
+ tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0],
+ TCG_AREG0);
+#endif
tcg_out_call(s, qemu_ld_helpers[opc & 3]);
switch (opc) {
@@ -1212,6 +1245,17 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
tcg_abort();
}
+#ifdef CONFIG_TCG_PASS_AREG0
+ /* XXX/FIXME: suboptimal */
+ tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3],
+ tcg_target_call_iarg_regs[2]);
+ tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[2],
+ tcg_target_call_iarg_regs[1]);
+ tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1],
+ tcg_target_call_iarg_regs[0]);
+ tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0],
+ TCG_AREG0);
+#endif
tcg_out_call(s, qemu_st_helpers[opc]);
/* label2: */
@@ -1617,23 +1661,18 @@ static int tcg_target_callee_save_regs[] = {
TCG_REG_R18
};
+#define FRAME_SIZE ((-TCG_TARGET_CALL_STACK_OFFSET \
+ + TCG_TARGET_STATIC_CALL_ARGS_SIZE \
+ + ARRAY_SIZE(tcg_target_callee_save_regs) * 4 \
+ + CPU_TEMP_BUF_NLONGS * sizeof(long) \
+ + TCG_TARGET_STACK_ALIGN - 1) \
+ & -TCG_TARGET_STACK_ALIGN)
+
static void tcg_target_qemu_prologue(TCGContext *s)
{
int frame_size, i;
- /* Allocate space for the fixed frame marker. */
- frame_size = -TCG_TARGET_CALL_STACK_OFFSET;
- frame_size += TCG_TARGET_STATIC_CALL_ARGS_SIZE;
-
- /* Allocate space for the saved registers. */
- frame_size += ARRAY_SIZE(tcg_target_callee_save_regs) * 4;
-
- /* Allocate space for the TCG temps. */
- frame_size += CPU_TEMP_BUF_NLONGS * sizeof(long);
-
- /* Align the allocated space. */
- frame_size = ((frame_size + TCG_TARGET_STACK_ALIGN - 1)
- & -TCG_TARGET_STACK_ALIGN);
+ frame_size = FRAME_SIZE;
/* The return address is stored in the caller's frame. */
tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_RP, TCG_REG_CALL_STACK, -20);
@@ -1708,3 +1747,81 @@ static void tcg_target_init(TCGContext *s)
tcg_add_target_add_op_defs(hppa_op_defs);
}
+
+typedef struct {
+ uint32_t len __attribute__((aligned((sizeof(void *)))));
+ uint32_t id;
+ uint8_t version;
+ char augmentation[1];
+ uint8_t code_align;
+ uint8_t data_align;
+ uint8_t return_column;
+} DebugFrameCIE;
+
+typedef struct {
+ uint32_t len __attribute__((aligned((sizeof(void *)))));
+ uint32_t cie_offset;
+ tcg_target_long func_start __attribute__((packed));
+ tcg_target_long func_len __attribute__((packed));
+ uint8_t def_cfa[4];
+ uint8_t ret_ofs[3];
+ uint8_t reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2];
+} DebugFrameFDE;
+
+typedef struct {
+ DebugFrameCIE cie;
+ DebugFrameFDE fde;
+} DebugFrame;
+
+#define ELF_HOST_MACHINE EM_PARISC
+#define ELF_HOST_FLAGS EFA_PARISC_1_1
+
+/* ??? BFD (and thus GDB) wants very much to distinguish between HPUX
+ and other extensions. We don't really care, but if we don't set this
+ to *something* then the object file won't be properly matched. */
+#define ELF_OSABI ELFOSABI_LINUX
+
+static DebugFrame debug_frame = {
+ .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
+ .cie.id = -1,
+ .cie.version = 1,
+ .cie.code_align = 1,
+ .cie.data_align = 1,
+ .cie.return_column = 2,
+
+ .fde.len = sizeof(DebugFrameFDE)-4, /* length after .len member */
+ .fde.def_cfa = {
+ 0x12, 30, /* DW_CFA_def_cfa_sf sp, ... */
+ (-FRAME_SIZE & 0x7f) | 0x80, /* ... sleb128 -FRAME_SIZE */
+ (-FRAME_SIZE >> 7) & 0x7f
+ },
+ .fde.ret_ofs = {
+ 0x11, 2, (-20 / 4) & 0x7f /* DW_CFA_offset_extended_sf r2, 20 */
+ },
+ .fde.reg_ofs = {
+ /* This must match the ordering in tcg_target_callee_save_regs. */
+ 0x80 + 4, 0, /* DW_CFA_offset r4, 0 */
+ 0x80 + 5, 4, /* DW_CFA_offset r5, 4 */
+ 0x80 + 6, 8, /* DW_CFA_offset r6, 8 */
+ 0x80 + 7, 12, /* ... */
+ 0x80 + 8, 16,
+ 0x80 + 9, 20,
+ 0x80 + 10, 24,
+ 0x80 + 11, 28,
+ 0x80 + 12, 32,
+ 0x80 + 13, 36,
+ 0x80 + 14, 40,
+ 0x80 + 15, 44,
+ 0x80 + 16, 48,
+ 0x80 + 17, 52,
+ 0x80 + 18, 56,
+ }
+};
+
+void tcg_register_jit(void *buf, size_t buf_size)
+{
+ debug_frame.fde.func_start = (tcg_target_long) buf;
+ debug_frame.fde.func_len = buf_size;
+
+ tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
+}
diff --git a/tcg/i386/tcg-target.c b/tcg/i386/tcg-target.c
index fafd900c5a..871a7e74f5 100644
--- a/tcg/i386/tcg-target.c
+++ b/tcg/i386/tcg-target.c
@@ -116,17 +116,7 @@ static inline int tcg_target_get_call_iarg_regs_count(int flags)
return 6;
}
- flags &= TCG_CALL_TYPE_MASK;
- switch(flags) {
- case TCG_CALL_TYPE_STD:
- return 0;
- case TCG_CALL_TYPE_REGPARM_1:
- case TCG_CALL_TYPE_REGPARM_2:
- case TCG_CALL_TYPE_REGPARM:
- return flags - TCG_CALL_TYPE_REGPARM_1 + 1;
- default:
- tcg_abort();
- }
+ return 0;
}
/* parse target specific constraints */
@@ -188,6 +178,9 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
tcg_regset_set32(ct->u.regs, 0, 0xffff);
tcg_regset_reset_reg(ct->u.regs, TCG_REG_RSI);
tcg_regset_reset_reg(ct->u.regs, TCG_REG_RDI);
+#ifdef CONFIG_TCG_PASS_AREG0
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_RDX);
+#endif
} else {
tcg_regset_set32(ct->u.regs, 0, 0xff);
tcg_regset_reset_reg(ct->u.regs, TCG_REG_EAX);
@@ -967,6 +960,27 @@ static void tcg_out_jmp(TCGContext *s, tcg_target_long dest)
#include "../../softmmu_defs.h"
+#ifdef CONFIG_TCG_PASS_AREG0
+/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
+ int mmu_idx) */
+static const void *qemu_ld_helpers[4] = {
+ helper_ldb_mmu,
+ helper_ldw_mmu,
+ helper_ldl_mmu,
+ helper_ldq_mmu,
+};
+
+/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
+ uintxx_t val, int mmu_idx) */
+static const void *qemu_st_helpers[4] = {
+ helper_stb_mmu,
+ helper_stw_mmu,
+ helper_stl_mmu,
+ helper_stq_mmu,
+};
+#else
+/* legacy helper signature: __ld_mmu(target_ulong addr, int
+ mmu_idx) */
static void *qemu_ld_helpers[4] = {
__ldb_mmu,
__ldw_mmu,
@@ -974,12 +988,15 @@ static void *qemu_ld_helpers[4] = {
__ldq_mmu,
};
+/* legacy helper signature: __st_mmu(target_ulong addr, uintxx_t val,
+ int mmu_idx) */
static void *qemu_st_helpers[4] = {
__stb_mmu,
__stw_mmu,
__stl_mmu,
__stq_mmu,
};
+#endif
/* Perform the TLB load and compare.
@@ -1148,7 +1165,12 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args,
int data_reg, data_reg2 = 0;
int addrlo_idx;
#if defined(CONFIG_SOFTMMU)
- int mem_index, s_bits, arg_idx;
+ int mem_index, s_bits;
+#if TCG_TARGET_REG_BITS == 64
+ int arg_idx;
+#else
+ int stack_adjust;
+#endif
uint8_t *label_ptr[3];
#endif
@@ -1184,16 +1206,48 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args,
}
/* XXX: move that code at the end of the TB */
+#if TCG_TARGET_REG_BITS == 32
+ tcg_out_pushi(s, mem_index);
+ stack_adjust = 4;
+ if (TARGET_LONG_BITS == 64) {
+ tcg_out_push(s, args[addrlo_idx + 1]);
+ stack_adjust += 4;
+ }
+ tcg_out_push(s, args[addrlo_idx]);
+ stack_adjust += 4;
+#ifdef CONFIG_TCG_PASS_AREG0
+ tcg_out_push(s, TCG_AREG0);
+ stack_adjust += 4;
+#endif
+#else
/* The first argument is already loaded with addrlo. */
arg_idx = 1;
- if (TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 64) {
- tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[arg_idx++],
- args[addrlo_idx + 1]);
- }
tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[arg_idx],
mem_index);
+#ifdef CONFIG_TCG_PASS_AREG0
+ /* XXX/FIXME: suboptimal */
+ tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[3],
+ tcg_target_call_iarg_regs[2]);
+ tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[2],
+ tcg_target_call_iarg_regs[1]);
+ tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[1],
+ tcg_target_call_iarg_regs[0]);
+ tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[0],
+ TCG_AREG0);
+#endif
+#endif
+
tcg_out_calli(s, (tcg_target_long)qemu_ld_helpers[s_bits]);
+#if TCG_TARGET_REG_BITS == 32
+ if (stack_adjust == (TCG_TARGET_REG_BITS / 8)) {
+ /* Pop and discard. This is 2 bytes smaller than the add. */
+ tcg_out_pop(s, TCG_REG_ECX);
+ } else if (stack_adjust != 0) {
+ tcg_out_addi(s, TCG_REG_CALL_STACK, stack_adjust);
+ }
+#endif
+
switch(opc) {
case 0 | 4:
tcg_out_ext8s(s, data_reg, TCG_REG_EAX, P_REXW);
@@ -1359,45 +1413,42 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args,
}
/* XXX: move that code at the end of the TB */
- if (TCG_TARGET_REG_BITS == 64) {
- tcg_out_mov(s, (opc == 3 ? TCG_TYPE_I64 : TCG_TYPE_I32),
- TCG_REG_RSI, data_reg);
- tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_RDX, mem_index);
- stack_adjust = 0;
- } else if (TARGET_LONG_BITS == 32) {
- tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_EDX, data_reg);
- if (opc == 3) {
- tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_ECX, data_reg2);
- tcg_out_pushi(s, mem_index);
- stack_adjust = 4;
- } else {
- tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_ECX, mem_index);
- stack_adjust = 0;
- }
- } else {
- if (opc == 3) {
- tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_EDX, args[addrlo_idx + 1]);
- tcg_out_pushi(s, mem_index);
- tcg_out_push(s, data_reg2);
- tcg_out_push(s, data_reg);
- stack_adjust = 12;
- } else {
- tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_EDX, args[addrlo_idx + 1]);
- switch(opc) {
- case 0:
- tcg_out_ext8u(s, TCG_REG_ECX, data_reg);
- break;
- case 1:
- tcg_out_ext16u(s, TCG_REG_ECX, data_reg);
- break;
- case 2:
- tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_ECX, data_reg);
- break;
- }
- tcg_out_pushi(s, mem_index);
- stack_adjust = 4;
- }
+#if TCG_TARGET_REG_BITS == 32
+ tcg_out_pushi(s, mem_index);
+ stack_adjust = 4;
+ if (opc == 3) {
+ tcg_out_push(s, data_reg2);
+ stack_adjust += 4;
}
+ tcg_out_push(s, data_reg);
+ stack_adjust += 4;
+ if (TARGET_LONG_BITS == 64) {
+ tcg_out_push(s, args[addrlo_idx + 1]);
+ stack_adjust += 4;
+ }
+ tcg_out_push(s, args[addrlo_idx]);
+ stack_adjust += 4;
+#ifdef CONFIG_TCG_PASS_AREG0
+ tcg_out_push(s, TCG_AREG0);
+ stack_adjust += 4;
+#endif
+#else
+ tcg_out_mov(s, (opc == 3 ? TCG_TYPE_I64 : TCG_TYPE_I32),
+ TCG_REG_RSI, data_reg);
+ tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_RDX, mem_index);
+ stack_adjust = 0;
+#ifdef CONFIG_TCG_PASS_AREG0
+ /* XXX/FIXME: suboptimal */
+ tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[3],
+ tcg_target_call_iarg_regs[2]);
+ tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[2],
+ tcg_target_call_iarg_regs[1]);
+ tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[1],
+ tcg_target_call_iarg_regs[0]);
+ tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[0],
+ TCG_AREG0);
+#endif
+#endif
tcg_out_calli(s, (tcg_target_long)qemu_st_helpers[s_bits]);
@@ -1938,22 +1989,29 @@ static int tcg_target_callee_save_regs[] = {
#endif
};
+/* Compute frame size via macros, to share between tcg_target_qemu_prologue
+ and tcg_register_jit. */
+
+#define PUSH_SIZE \
+ ((1 + ARRAY_SIZE(tcg_target_callee_save_regs)) \
+ * (TCG_TARGET_REG_BITS / 8))
+
+#define FRAME_SIZE \
+ ((PUSH_SIZE \
+ + TCG_STATIC_CALL_ARGS_SIZE \
+ + CPU_TEMP_BUF_NLONGS * sizeof(long) \
+ + TCG_TARGET_STACK_ALIGN - 1) \
+ & ~(TCG_TARGET_STACK_ALIGN - 1))
+
/* Generate global QEMU prologue and epilogue code */
static void tcg_target_qemu_prologue(TCGContext *s)
{
- int i, frame_size, push_size, stack_addend;
+ int i, stack_addend;
/* TB prologue */
/* Reserve some stack space, also for TCG temps. */
- push_size = 1 + ARRAY_SIZE(tcg_target_callee_save_regs);
- push_size *= TCG_TARGET_REG_BITS / 8;
-
- frame_size = push_size + TCG_STATIC_CALL_ARGS_SIZE +
- CPU_TEMP_BUF_NLONGS * sizeof(long);
- frame_size = (frame_size + TCG_TARGET_STACK_ALIGN - 1) &
- ~(TCG_TARGET_STACK_ALIGN - 1);
- stack_addend = frame_size - push_size;
+ stack_addend = FRAME_SIZE - PUSH_SIZE;
tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE,
CPU_TEMP_BUF_NLONGS * sizeof(long));
@@ -1962,9 +2020,15 @@ static void tcg_target_qemu_prologue(TCGContext *s)
tcg_out_push(s, tcg_target_callee_save_regs[i]);
}
- tcg_out_addi(s, TCG_REG_ESP, -stack_addend);
-
+#if TCG_TARGET_REG_BITS == 32
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP,
+ (ARRAY_SIZE(tcg_target_callee_save_regs) + 1) * 4);
+ tcg_out_ld(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[1], TCG_REG_ESP,
+ (ARRAY_SIZE(tcg_target_callee_save_regs) + 2) * 4);
+#else
tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
+#endif
+ tcg_out_addi(s, TCG_REG_ESP, -stack_addend);
/* jmp *tb. */
tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, tcg_target_call_iarg_regs[1]);
@@ -2013,3 +2077,92 @@ static void tcg_target_init(TCGContext *s)
tcg_add_target_add_op_defs(x86_op_defs);
}
+
+typedef struct {
+ uint32_t len __attribute__((aligned((sizeof(void *)))));
+ uint32_t id;
+ uint8_t version;
+ char augmentation[1];
+ uint8_t code_align;
+ uint8_t data_align;
+ uint8_t return_column;
+} DebugFrameCIE;
+
+typedef struct {
+ uint32_t len __attribute__((aligned((sizeof(void *)))));
+ uint32_t cie_offset;
+ tcg_target_long func_start __attribute__((packed));
+ tcg_target_long func_len __attribute__((packed));
+ uint8_t def_cfa[4];
+ uint8_t reg_ofs[14];
+} DebugFrameFDE;
+
+typedef struct {
+ DebugFrameCIE cie;
+ DebugFrameFDE fde;
+} DebugFrame;
+
+#if TCG_TARGET_REG_BITS == 64
+#define ELF_HOST_MACHINE EM_X86_64
+static DebugFrame debug_frame = {
+ .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
+ .cie.id = -1,
+ .cie.version = 1,
+ .cie.code_align = 1,
+ .cie.data_align = 0x78, /* sleb128 -8 */
+ .cie.return_column = 16,
+
+ .fde.len = sizeof(DebugFrameFDE)-4, /* length after .len member */
+ .fde.def_cfa = {
+ 12, 7, /* DW_CFA_def_cfa %rsp, ... */
+ (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
+ (FRAME_SIZE >> 7)
+ },
+ .fde.reg_ofs = {
+ 0x90, 1, /* DW_CFA_offset, %rip, -8 */
+ /* The following ordering must match tcg_target_callee_save_regs. */
+ 0x86, 2, /* DW_CFA_offset, %rbp, -16 */
+ 0x83, 3, /* DW_CFA_offset, %rbx, -24 */
+ 0x8c, 4, /* DW_CFA_offset, %r12, -32 */
+ 0x8d, 5, /* DW_CFA_offset, %r13, -40 */
+ 0x8e, 6, /* DW_CFA_offset, %r14, -48 */
+ 0x8f, 7, /* DW_CFA_offset, %r15, -56 */
+ }
+};
+#else
+#define ELF_HOST_MACHINE EM_386
+static DebugFrame debug_frame = {
+ .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
+ .cie.id = -1,
+ .cie.version = 1,
+ .cie.code_align = 1,
+ .cie.data_align = 0x7c, /* sleb128 -4 */
+ .cie.return_column = 8,
+
+ .fde.len = sizeof(DebugFrameFDE)-4, /* length after .len member */
+ .fde.def_cfa = {
+ 12, 4, /* DW_CFA_def_cfa %esp, ... */
+ (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
+ (FRAME_SIZE >> 7)
+ },
+ .fde.reg_ofs = {
+ 0x88, 1, /* DW_CFA_offset, %eip, -4 */
+ /* The following ordering must match tcg_target_callee_save_regs. */
+ 0x85, 2, /* DW_CFA_offset, %ebp, -8 */
+ 0x83, 3, /* DW_CFA_offset, %ebx, -12 */
+ 0x86, 4, /* DW_CFA_offset, %esi, -16 */
+ 0x87, 5, /* DW_CFA_offset, %edi, -20 */
+ }
+};
+#endif
+
+void tcg_register_jit(void *buf, size_t buf_size)
+{
+ /* We're expecting a 2 byte uleb128 encoded value. */
+ assert(FRAME_SIZE >> 14 == 0);
+
+ debug_frame.fde.func_start = (tcg_target_long) buf;
+ debug_frame.fde.func_len = buf_size;
+
+ tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
+}
diff --git a/tcg/ia64/tcg-target.c b/tcg/ia64/tcg-target.c
index f90252a443..e02dacc84f 100644
--- a/tcg/ia64/tcg-target.c
+++ b/tcg/ia64/tcg-target.c
@@ -1452,12 +1452,25 @@ static inline void tcg_out_qemu_tlb(TCGContext *s, TCGArg addr_reg,
TCG_REG_P7, TCG_REG_R3, TCG_REG_R57));
}
+#ifdef CONFIG_TCG_PASS_AREG0
+/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
+ int mmu_idx) */
+static const void * const qemu_ld_helpers[4] = {
+ helper_ldb_mmu,
+ helper_ldw_mmu,
+ helper_ldl_mmu,
+ helper_ldq_mmu,
+};
+#else
+/* legacy helper signature: __ld_mmu(target_ulong addr, int
+ mmu_idx) */
static void *qemu_ld_helpers[4] = {
__ldb_mmu,
__ldw_mmu,
__ldl_mmu,
__ldq_mmu,
};
+#endif
static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
{
@@ -1517,6 +1530,15 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
tcg_opc_m1 (TCG_REG_P7, OPC_LD8_M1, TCG_REG_R1, TCG_REG_R2),
tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0));
}
+#ifdef CONFIG_TCG_PASS_AREG0
+ /* XXX/FIXME: suboptimal */
+ tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2],
+ tcg_target_call_iarg_regs[1]);
+ tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1],
+ tcg_target_call_iarg_regs[0]);
+ tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0],
+ TCG_AREG0);
+#endif
if (!bswap || s_bits == 0) {
tcg_out_bundle(s, miB,
tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
@@ -1547,12 +1569,25 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
}
}
+#ifdef CONFIG_TCG_PASS_AREG0
+/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
+ uintxx_t val, int mmu_idx) */
+static const void * const qemu_st_helpers[4] = {
+ helper_stb_mmu,
+ helper_stw_mmu,
+ helper_stl_mmu,
+ helper_stq_mmu,
+};
+#else
+/* legacy helper signature: __st_mmu(target_ulong addr, uintxx_t val,
+ int mmu_idx) */
static void *qemu_st_helpers[4] = {
__stb_mmu,
__stw_mmu,
__stl_mmu,
__stq_mmu,
};
+#endif
static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
{
@@ -1622,6 +1657,17 @@ static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
data_reg = TCG_REG_R2;
}
+#ifdef CONFIG_TCG_PASS_AREG0
+ /* XXX/FIXME: suboptimal */
+ tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3],
+ tcg_target_call_iarg_regs[2]);
+ tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[2],
+ tcg_target_call_iarg_regs[1]);
+ tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1],
+ tcg_target_call_iarg_regs[0]);
+ tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0],
+ TCG_AREG0);
+#endif
tcg_out_bundle(s, miB,
tcg_opc_m4 (TCG_REG_P6, opc_st_m4[opc],
data_reg, TCG_REG_R3),
diff --git a/tcg/mips/tcg-target.c b/tcg/mips/tcg-target.c
index c6aa5bced5..393ba07f25 100644
--- a/tcg/mips/tcg-target.c
+++ b/tcg/mips/tcg-target.c
@@ -750,6 +750,27 @@ static void tcg_out_setcond2(TCGContext *s, TCGCond cond, int ret,
#include "../../softmmu_defs.h"
+#ifdef CONFIG_TCG_PASS_AREG0
+/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
+ int mmu_idx) */
+static const void * const qemu_ld_helpers[4] = {
+ helper_ldb_mmu,
+ helper_ldw_mmu,
+ helper_ldl_mmu,
+ helper_ldq_mmu,
+};
+
+/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
+ uintxx_t val, int mmu_idx) */
+static const void * const qemu_st_helpers[4] = {
+ helper_stb_mmu,
+ helper_stw_mmu,
+ helper_stl_mmu,
+ helper_stq_mmu,
+};
+#else
+/* legacy helper signature: __ld_mmu(target_ulong addr, int
+ mmu_idx) */
static void *qemu_ld_helpers[4] = {
__ldb_mmu,
__ldw_mmu,
@@ -757,6 +778,8 @@ static void *qemu_ld_helpers[4] = {
__ldq_mmu,
};
+/* legacy helper signature: __st_mmu(target_ulong addr, uintxx_t val,
+ int mmu_idx) */
static void *qemu_st_helpers[4] = {
__stb_mmu,
__stw_mmu,
@@ -764,6 +787,7 @@ static void *qemu_st_helpers[4] = {
__stq_mmu,
};
#endif
+#endif
static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args,
int opc)
@@ -858,6 +882,15 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args,
# endif
tcg_out_movi(s, TCG_TYPE_I32, sp_args++, mem_index);
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T9, (tcg_target_long)qemu_ld_helpers[s_bits]);
+#ifdef CONFIG_TCG_PASS_AREG0
+ /* XXX/FIXME: suboptimal and incorrect for 64 on 32 bit */
+ tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2],
+ tcg_target_call_iarg_regs[1]);
+ tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1],
+ tcg_target_call_iarg_regs[0]);
+ tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0],
+ TCG_AREG0);
+#endif
tcg_out_opc_reg(s, OPC_JALR, TCG_REG_RA, TCG_REG_T9, 0);
tcg_out_nop(s);
@@ -1069,6 +1102,17 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args,
}
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T9, (tcg_target_long)qemu_st_helpers[s_bits]);
+#ifdef CONFIG_TCG_PASS_AREG0
+ /* XXX/FIXME: suboptimal and incorrect for 64 on 32 bit */
+ tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3],
+ tcg_target_call_iarg_regs[2]);
+ tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[2],
+ tcg_target_call_iarg_regs[1]);
+ tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1],
+ tcg_target_call_iarg_regs[0]);
+ tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0],
+ TCG_AREG0);
+#endif
tcg_out_opc_reg(s, OPC_JALR, TCG_REG_RA, TCG_REG_T9, 0);
tcg_out_nop(s);
diff --git a/tcg/ppc/tcg-target.c b/tcg/ppc/tcg-target.c
index 6a34cab5f9..b0aa914798 100644
--- a/tcg/ppc/tcg-target.c
+++ b/tcg/ppc/tcg-target.c
@@ -508,6 +508,27 @@ static void tcg_out_call (TCGContext *s, tcg_target_long arg, int const_arg)
#include "../../softmmu_defs.h"
+#ifdef CONFIG_TCG_PASS_AREG0
+/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
+ int mmu_idx) */
+static const void * const qemu_ld_helpers[4] = {
+ helper_ldb_mmu,
+ helper_ldw_mmu,
+ helper_ldl_mmu,
+ helper_ldq_mmu,
+};
+
+/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
+ uintxx_t val, int mmu_idx) */
+static const void * const qemu_st_helpers[4] = {
+ helper_stb_mmu,
+ helper_stw_mmu,
+ helper_stl_mmu,
+ helper_stq_mmu,
+};
+#else
+/* legacy helper signature: __ld_mmu(target_ulong addr, int
+ mmu_idx) */
static void *qemu_ld_helpers[4] = {
__ldb_mmu,
__ldw_mmu,
@@ -515,6 +536,8 @@ static void *qemu_ld_helpers[4] = {
__ldq_mmu,
};
+/* legacy helper signature: __ld_mmu(target_ulong addr, int
+ mmu_idx) */
static void *qemu_st_helpers[4] = {
__stb_mmu,
__stw_mmu,
@@ -522,6 +545,7 @@ static void *qemu_st_helpers[4] = {
__stq_mmu,
};
#endif
+#endif
static void tcg_out_qemu_ld (TCGContext *s, const TCGArg *args, int opc)
{
@@ -598,6 +622,16 @@ static void tcg_out_qemu_ld (TCGContext *s, const TCGArg *args, int opc)
tcg_out_movi (s, TCG_TYPE_I32, 5, mem_index);
#endif
+#ifdef CONFIG_TCG_PASS_AREG0
+ /* XXX/FIXME: suboptimal */
+ tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2],
+ tcg_target_call_iarg_regs[1]);
+ tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1],
+ tcg_target_call_iarg_regs[0]);
+ tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0],
+ TCG_AREG0);
+#endif
+
tcg_out_call (s, (tcg_target_long) qemu_ld_helpers[s_bits], 1);
switch (opc) {
case 0|4:
@@ -829,6 +863,17 @@ static void tcg_out_qemu_st (TCGContext *s, const TCGArg *args, int opc)
ir++;
tcg_out_movi (s, TCG_TYPE_I32, ir, mem_index);
+#ifdef CONFIG_TCG_PASS_AREG0
+ /* XXX/FIXME: suboptimal */
+ tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3],
+ tcg_target_call_iarg_regs[2]);
+ tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[2],
+ tcg_target_call_iarg_regs[1]);
+ tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1],
+ tcg_target_call_iarg_regs[0]);
+ tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0],
+ TCG_AREG0);
+#endif
tcg_out_call (s, (tcg_target_long) qemu_st_helpers[opc], 1);
label2_ptr = s->code_ptr;
tcg_out32 (s, B);
diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h
index 3f22aaac9d..2f37fd289b 100644
--- a/tcg/ppc/tcg-target.h
+++ b/tcg/ppc/tcg-target.h
@@ -98,5 +98,5 @@ typedef enum {
#define TCG_TARGET_HAS_GUEST_BASE
#define tcg_qemu_tb_exec(env, tb_ptr) \
- ((long REGPARM __attribute__ ((longcall)) \
+ ((long __attribute__ ((longcall)) \
(*)(void *, void *))code_gen_prologue)(env, tb_ptr)
diff --git a/tcg/ppc64/tcg-target.c b/tcg/ppc64/tcg-target.c
index 7f723b5c2c..f2ad9e3d85 100644
--- a/tcg/ppc64/tcg-target.c
+++ b/tcg/ppc64/tcg-target.c
@@ -552,6 +552,27 @@ static void tcg_out_ldsta (TCGContext *s, int ret, int addr,
#include "../../softmmu_defs.h"
+#ifdef CONFIG_TCG_PASS_AREG0
+/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
+ int mmu_idx) */
+static const void * const qemu_ld_helpers[4] = {
+ helper_ldb_mmu,
+ helper_ldw_mmu,
+ helper_ldl_mmu,
+ helper_ldq_mmu,
+};
+
+/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
+ uintxx_t val, int mmu_idx) */
+static const void * const qemu_st_helpers[4] = {
+ helper_stb_mmu,
+ helper_stw_mmu,
+ helper_stl_mmu,
+ helper_stq_mmu,
+};
+#else
+/* legacy helper signature: __ld_mmu(target_ulong addr, int
+ mmu_idx) */
static void *qemu_ld_helpers[4] = {
__ldb_mmu,
__ldw_mmu,
@@ -559,12 +580,15 @@ static void *qemu_ld_helpers[4] = {
__ldq_mmu,
};
+/* legacy helper signature: __st_mmu(target_ulong addr, uintxx_t val,
+ int mmu_idx) */
static void *qemu_st_helpers[4] = {
__stb_mmu,
__stw_mmu,
__stl_mmu,
__stq_mmu,
};
+#endif
static void tcg_out_tlb_read (TCGContext *s, int r0, int r1, int r2,
int addr_reg, int s_bits, int offset)
@@ -648,6 +672,15 @@ static void tcg_out_qemu_ld (TCGContext *s, const TCGArg *args, int opc)
tcg_out_mov (s, TCG_TYPE_I64, 3, addr_reg);
tcg_out_movi (s, TCG_TYPE_I64, 4, mem_index);
+#ifdef CONFIG_TCG_PASS_AREG0
+ /* XXX/FIXME: suboptimal */
+ tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2],
+ tcg_target_call_iarg_regs[1]);
+ tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1],
+ tcg_target_call_iarg_regs[0]);
+ tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0],
+ TCG_AREG0);
+#endif
tcg_out_call (s, (tcg_target_long) qemu_ld_helpers[s_bits], 1);
switch (opc) {
@@ -796,6 +829,17 @@ static void tcg_out_qemu_st (TCGContext *s, const TCGArg *args, int opc)
tcg_out_rld (s, RLDICL, 4, data_reg, 0, 64 - (1 << (3 + opc)));
tcg_out_movi (s, TCG_TYPE_I64, 5, mem_index);
+#ifdef CONFIG_TCG_PASS_AREG0
+ /* XXX/FIXME: suboptimal */
+ tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3],
+ tcg_target_call_iarg_regs[2]);
+ tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[2],
+ tcg_target_call_iarg_regs[1]);
+ tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1],
+ tcg_target_call_iarg_regs[0]);
+ tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0],
+ TCG_AREG0);
+#endif
tcg_out_call (s, (tcg_target_long) qemu_st_helpers[opc], 1);
label2_ptr = s->code_ptr;
@@ -1612,7 +1656,6 @@ static const TCGTargetOpDef ppc_op_defs[] = {
{ INDEX_op_ld16s_i64, { "r", "r" } },
{ INDEX_op_ld32u_i64, { "r", "r" } },
{ INDEX_op_ld32s_i64, { "r", "r" } },
- { INDEX_op_ld_i64, { "r", "r" } },
{ INDEX_op_add_i32, { "r", "r", "ri" } },
{ INDEX_op_mul_i32, { "r", "r", "ri" } },
diff --git a/tcg/s390/tcg-target.c b/tcg/s390/tcg-target.c
index 47ffcc1f51..04662c15fd 100644
--- a/tcg/s390/tcg-target.c
+++ b/tcg/s390/tcg-target.c
@@ -301,6 +301,27 @@ static const uint8_t tcg_cond_to_ltr_cond[10] = {
#include "../../softmmu_defs.h"
+#ifdef CONFIG_TCG_PASS_AREG0
+/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
+ int mmu_idx) */
+static const void * const qemu_ld_helpers[4] = {
+ helper_ldb_mmu,
+ helper_ldw_mmu,
+ helper_ldl_mmu,
+ helper_ldq_mmu,
+};
+
+/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
+ uintxx_t val, int mmu_idx) */
+static const void * const qemu_st_helpers[4] = {
+ helper_stb_mmu,
+ helper_stw_mmu,
+ helper_stl_mmu,
+ helper_stq_mmu,
+};
+#else
+/* legacy helper signature: __ld_mmu(target_ulong addr, int
+ mmu_idx) */
static void *qemu_ld_helpers[4] = {
__ldb_mmu,
__ldw_mmu,
@@ -308,6 +329,8 @@ static void *qemu_ld_helpers[4] = {
__ldq_mmu,
};
+/* legacy helper signature: __st_mmu(target_ulong addr, uintxx_t val,
+ int mmu_idx) */
static void *qemu_st_helpers[4] = {
__stb_mmu,
__stw_mmu,
@@ -315,6 +338,7 @@ static void *qemu_st_helpers[4] = {
__stq_mmu,
};
#endif
+#endif
static uint8_t *tb_ret_addr;
@@ -1483,9 +1507,29 @@ static void tcg_prepare_qemu_ldst(TCGContext* s, TCGReg data_reg,
tcg_abort();
}
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R4, mem_index);
+#ifdef CONFIG_TCG_PASS_AREG0
+ /* XXX/FIXME: suboptimal */
+ tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2],
+ tcg_target_call_iarg_regs[1]);
+ tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1],
+ tcg_target_call_iarg_regs[0]);
+ tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0],
+ TCG_AREG0);
+#endif
tgen_calli(s, (tcg_target_ulong)qemu_st_helpers[s_bits]);
} else {
tcg_out_movi(s, TCG_TYPE_I32, arg1, mem_index);
+#ifdef CONFIG_TCG_PASS_AREG0
+ /* XXX/FIXME: suboptimal */
+ tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3],
+ tcg_target_call_iarg_regs[2]);
+ tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[2],
+ tcg_target_call_iarg_regs[1]);
+ tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1],
+ tcg_target_call_iarg_regs[0]);
+ tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0],
+ TCG_AREG0);
+#endif
tgen_calli(s, (tcg_target_ulong)qemu_ld_helpers[s_bits]);
/* sign extension */
diff --git a/tcg/sparc/tcg-target.c b/tcg/sparc/tcg-target.c
index b287122df5..247a27814a 100644
--- a/tcg/sparc/tcg-target.c
+++ b/tcg/sparc/tcg-target.c
@@ -59,6 +59,12 @@ static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
};
#endif
+#ifdef CONFIG_TCG_PASS_AREG0
+#define ARG_OFFSET 1
+#else
+#define ARG_OFFSET 0
+#endif
+
static const int tcg_target_reg_alloc_order[] = {
TCG_REG_L0,
TCG_REG_L1,
@@ -86,9 +92,9 @@ static const int tcg_target_call_iarg_regs[6] = {
static const int tcg_target_call_oarg_regs[] = {
TCG_REG_O0,
-#if TCG_TARGET_REG_BITS == 32
- TCG_REG_O1
-#endif
+ TCG_REG_O1,
+ TCG_REG_O2,
+ TCG_REG_O3,
};
static inline int check_fit_tl(tcg_target_long val, unsigned int bits)
@@ -155,6 +161,9 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
tcg_regset_reset_reg(ct->u.regs, TCG_REG_O0);
tcg_regset_reset_reg(ct->u.regs, TCG_REG_O1);
tcg_regset_reset_reg(ct->u.regs, TCG_REG_O2);
+#ifdef CONFIG_TCG_PASS_AREG0
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_O3);
+#endif
break;
case 'I':
ct->ct |= TCG_CT_CONST_S11;
@@ -706,6 +715,27 @@ static void tcg_target_qemu_prologue(TCGContext *s)
#include "../../softmmu_defs.h"
+#ifdef CONFIG_TCG_PASS_AREG0
+/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
+ int mmu_idx) */
+static const void * const qemu_ld_helpers[4] = {
+ helper_ldb_mmu,
+ helper_ldw_mmu,
+ helper_ldl_mmu,
+ helper_ldq_mmu,
+};
+
+/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
+ uintxx_t val, int mmu_idx) */
+static const void * const qemu_st_helpers[4] = {
+ helper_stb_mmu,
+ helper_stw_mmu,
+ helper_stl_mmu,
+ helper_stq_mmu,
+};
+#else
+/* legacy helper signature: __ld_mmu(target_ulong addr, int
+ mmu_idx) */
static const void * const qemu_ld_helpers[4] = {
__ldb_mmu,
__ldw_mmu,
@@ -713,6 +743,8 @@ static const void * const qemu_ld_helpers[4] = {
__ldq_mmu,
};
+/* legacy helper signature: __st_mmu(target_ulong addr, uintxx_t val,
+ int mmu_idx) */
static const void * const qemu_st_helpers[4] = {
__stb_mmu,
__stw_mmu,
@@ -720,6 +752,7 @@ static const void * const qemu_st_helpers[4] = {
__stq_mmu,
};
#endif
+#endif
#if TARGET_LONG_BITS == 32
#define TARGET_LD_OP LDUW
@@ -801,6 +834,17 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args,
/* mov */
tcg_out_movi(s, TCG_TYPE_I32, arg1, mem_index);
+#ifdef CONFIG_TCG_PASS_AREG0
+ /* XXX/FIXME: suboptimal */
+ tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3],
+ tcg_target_call_iarg_regs[2]);
+ tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[2],
+ tcg_target_call_iarg_regs[1]);
+ tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1],
+ tcg_target_call_iarg_regs[0]);
+ tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0],
+ TCG_AREG0);
+#endif
/* XXX: move that code at the end of the TB */
/* qemu_ld_helper[s_bits](arg0, arg1) */
@@ -1017,6 +1061,17 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args,
/* mov */
tcg_out_movi(s, TCG_TYPE_I32, arg2, mem_index);
+#ifdef CONFIG_TCG_PASS_AREG0
+ /* XXX/FIXME: suboptimal */
+ tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3],
+ tcg_target_call_iarg_regs[2]);
+ tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[2],
+ tcg_target_call_iarg_regs[1]);
+ tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1],
+ tcg_target_call_iarg_regs[0]);
+ tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0],
+ TCG_AREG0);
+#endif
/* XXX: move that code at the end of the TB */
/* qemu_st_helper[s_bits](arg0, arg1, arg2) */
tcg_out32(s, CALL | ((((tcg_target_ulong)qemu_st_helpers[s_bits]
@@ -1569,3 +1624,66 @@ static void tcg_target_init(TCGContext *s)
tcg_regset_set_reg(s->reserved_regs, TCG_REG_O7);
tcg_add_target_add_op_defs(sparc_op_defs);
}
+
+#if TCG_TARGET_REG_BITS == 64
+# define ELF_HOST_MACHINE EM_SPARCV9
+#elif defined(__sparc_v8plus__)
+# define ELF_HOST_MACHINE EM_SPARC32PLUS
+# define ELF_HOST_FLAGS EF_SPARC_32PLUS
+#else
+# define ELF_HOST_MACHINE EM_SPARC
+#endif
+
+typedef struct {
+ uint32_t len __attribute__((aligned((sizeof(void *)))));
+ uint32_t id;
+ uint8_t version;
+ char augmentation[1];
+ uint8_t code_align;
+ uint8_t data_align;
+ uint8_t return_column;
+} DebugFrameCIE;
+
+typedef struct {
+ uint32_t len __attribute__((aligned((sizeof(void *)))));
+ uint32_t cie_offset;
+ tcg_target_long func_start __attribute__((packed));
+ tcg_target_long func_len __attribute__((packed));
+ uint8_t def_cfa[TCG_TARGET_REG_BITS == 64 ? 4 : 2];
+ uint8_t win_save;
+ uint8_t ret_save[3];
+} DebugFrameFDE;
+
+typedef struct {
+ DebugFrameCIE cie;
+ DebugFrameFDE fde;
+} DebugFrame;
+
+static DebugFrame debug_frame = {
+ .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
+ .cie.id = -1,
+ .cie.version = 1,
+ .cie.code_align = 1,
+ .cie.data_align = -sizeof(void *) & 0x7f,
+ .cie.return_column = 15, /* o7 */
+
+ .fde.len = sizeof(DebugFrameFDE)-4, /* length after .len member */
+ .fde.def_cfa = {
+#if TCG_TARGET_REG_BITS == 64
+ 12, 30, /* DW_CFA_def_cfa i6, 2047 */
+ (2047 & 0x7f) | 0x80, (2047 >> 7)
+#else
+ 13, 30 /* DW_CFA_def_cfa_register i6 */
+#endif
+ },
+ .fde.win_save = 0x2d, /* DW_CFA_GNU_window_save */
+ .fde.ret_save = { 9, 15, 31 }, /* DW_CFA_register o7, i7 */
+};
+
+void tcg_register_jit(void *buf, size_t buf_size)
+{
+ debug_frame.fde.func_start = (tcg_target_long) buf;
+ debug_frame.fde.func_len = buf_size;
+
+ tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
+}
diff --git a/tcg/tcg.c b/tcg/tcg.c
index 531db55f5d..ab589c7ad2 100644
--- a/tcg/tcg.c
+++ b/tcg/tcg.c
@@ -28,6 +28,9 @@
#include "config.h"
+/* Define to jump the ELF file used to communicate with GDB. */
+#undef DEBUG_JIT
+
#if !defined(CONFIG_DEBUG_TCG) && !defined(NDEBUG)
/* define it to suppress various consistency checks (faster) */
#define NDEBUG
@@ -45,6 +48,18 @@
#include "cpu.h"
#include "tcg-op.h"
+
+#if TCG_TARGET_REG_BITS == 64
+# define ELF_CLASS ELFCLASS64
+#else
+# define ELF_CLASS ELFCLASS32
+#endif
+#ifdef HOST_WORDS_BIGENDIAN
+# define ELF_DATA ELFDATA2MSB
+#else
+# define ELF_DATA ELFDATA2LSB
+#endif
+
#include "elf.h"
#if defined(CONFIG_USE_GUEST_BASE) && !defined(TCG_TARGET_HAS_GUEST_BASE)
@@ -57,6 +72,10 @@ static void tcg_target_qemu_prologue(TCGContext *s);
static void patch_reloc(uint8_t *code_ptr, int type,
tcg_target_long value, tcg_target_long addend);
+static void tcg_register_jit_int(void *buf, size_t size,
+ void *debug_frame, size_t debug_frame_size)
+ __attribute__((unused));
+
/* Forward declarations for functions declared and used in tcg-target.c. */
static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str);
static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
@@ -173,11 +192,9 @@ void *tcg_malloc_internal(TCGContext *s, int size)
/* big malloc: insert a new pool (XXX: could optimize) */
p = g_malloc(sizeof(TCGPool) + size);
p->size = size;
- if (s->pool_current)
- s->pool_current->next = p;
- else
- s->pool_first = p;
- p->next = s->pool_current;
+ p->next = s->pool_first_large;
+ s->pool_first_large = p;
+ return p->data;
} else {
p = s->pool_current;
if (!p) {
@@ -208,6 +225,12 @@ void *tcg_malloc_internal(TCGContext *s, int size)
void tcg_pool_reset(TCGContext *s)
{
+ TCGPool *p, *t;
+ for (p = s->pool_first_large; p; p = t) {
+ t = p->next;
+ g_free(p);
+ }
+ s->pool_first_large = NULL;
s->pool_cur = s->pool_end = NULL;
s->pool_current = NULL;
}
@@ -590,9 +613,6 @@ void tcg_register_helper(void *func, const char *name)
void tcg_gen_callN(TCGContext *s, TCGv_ptr func, unsigned int flags,
int sizemask, TCGArg ret, int nargs, TCGArg *args)
{
-#if defined(TCG_TARGET_I386) && TCG_TARGET_REG_BITS < 64
- int call_type;
-#endif
int i;
int real_args;
int nb_rets;
@@ -617,9 +637,6 @@ void tcg_gen_callN(TCGContext *s, TCGv_ptr func, unsigned int flags,
*gen_opc_ptr++ = INDEX_op_call;
nparam = gen_opparam_ptr++;
-#if defined(TCG_TARGET_I386) && TCG_TARGET_REG_BITS < 64
- call_type = (flags & TCG_CALL_TYPE_MASK);
-#endif
if (ret != TCG_CALL_DUMMY_ARG) {
#if TCG_TARGET_REG_BITS < 64
if (sizemask & 1) {
@@ -645,14 +662,6 @@ void tcg_gen_callN(TCGContext *s, TCGv_ptr func, unsigned int flags,
#if TCG_TARGET_REG_BITS < 64
int is_64bit = sizemask & (1 << (i+1)*2);
if (is_64bit) {
-#ifdef TCG_TARGET_I386
- /* REGPARM case: if the third parameter is 64 bit, it is
- allocated on the stack */
- if (i == 2 && call_type == TCG_CALL_TYPE_REGPARM) {
- call_type = TCG_CALL_TYPE_REGPARM_2;
- flags = (flags & ~TCG_CALL_TYPE_MASK) | call_type;
- }
-#endif
#ifdef TCG_TARGET_CALL_ALIGN_ARGS
/* some targets want aligned 64 bit args */
if (real_args & 1) {
@@ -2241,3 +2250,267 @@ void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf)
cpu_fprintf(f, "[TCG profiler not compiled]\n");
}
#endif
+
+#ifdef ELF_HOST_MACHINE
+/* In order to use this feature, the backend needs to do three things:
+
+ (1) Define ELF_HOST_MACHINE to indicate both what value to
+ put into the ELF image and to indicate support for the feature.
+
+ (2) Define tcg_register_jit. This should create a buffer containing
+ the contents of a .debug_frame section that describes the post-
+ prologue unwind info for the tcg machine.
+
+ (3) Call tcg_register_jit_int, with the constructed .debug_frame.
+*/
+
+/* Begin GDB interface. THE FOLLOWING MUST MATCH GDB DOCS. */
+typedef enum {
+ JIT_NOACTION = 0,
+ JIT_REGISTER_FN,
+ JIT_UNREGISTER_FN
+} jit_actions_t;
+
+struct jit_code_entry {
+ struct jit_code_entry *next_entry;
+ struct jit_code_entry *prev_entry;
+ const void *symfile_addr;
+ uint64_t symfile_size;
+};
+
+struct jit_descriptor {
+ uint32_t version;
+ uint32_t action_flag;
+ struct jit_code_entry *relevant_entry;
+ struct jit_code_entry *first_entry;
+};
+
+void __jit_debug_register_code(void) __attribute__((noinline));
+void __jit_debug_register_code(void)
+{
+ asm("");
+}
+
+/* Must statically initialize the version, because GDB may check
+ the version before we can set it. */
+struct jit_descriptor __jit_debug_descriptor = { 1, 0, 0, 0 };
+
+/* End GDB interface. */
+
+static int find_string(const char *strtab, const char *str)
+{
+ const char *p = strtab + 1;
+
+ while (1) {
+ if (strcmp(p, str) == 0) {
+ return p - strtab;
+ }
+ p += strlen(p) + 1;
+ }
+}
+
+static void tcg_register_jit_int(void *buf_ptr, size_t buf_size,
+ void *debug_frame, size_t debug_frame_size)
+{
+ struct __attribute__((packed)) DebugInfo {
+ uint32_t len;
+ uint16_t version;
+ uint32_t abbrev;
+ uint8_t ptr_size;
+ uint8_t cu_die;
+ uint16_t cu_lang;
+ uintptr_t cu_low_pc;
+ uintptr_t cu_high_pc;
+ uint8_t fn_die;
+ char fn_name[16];
+ uintptr_t fn_low_pc;
+ uintptr_t fn_high_pc;
+ uint8_t cu_eoc;
+ };
+
+ struct ElfImage {
+ ElfW(Ehdr) ehdr;
+ ElfW(Phdr) phdr;
+ ElfW(Shdr) shdr[7];
+ ElfW(Sym) sym[2];
+ struct DebugInfo di;
+ uint8_t da[24];
+ char str[80];
+ };
+
+ struct ElfImage *img;
+
+ static const struct ElfImage img_template = {
+ .ehdr = {
+ .e_ident[EI_MAG0] = ELFMAG0,
+ .e_ident[EI_MAG1] = ELFMAG1,
+ .e_ident[EI_MAG2] = ELFMAG2,
+ .e_ident[EI_MAG3] = ELFMAG3,
+ .e_ident[EI_CLASS] = ELF_CLASS,
+ .e_ident[EI_DATA] = ELF_DATA,
+ .e_ident[EI_VERSION] = EV_CURRENT,
+ .e_type = ET_EXEC,
+ .e_machine = ELF_HOST_MACHINE,
+ .e_version = EV_CURRENT,
+ .e_phoff = offsetof(struct ElfImage, phdr),
+ .e_shoff = offsetof(struct ElfImage, shdr),
+ .e_ehsize = sizeof(ElfW(Shdr)),
+ .e_phentsize = sizeof(ElfW(Phdr)),
+ .e_phnum = 1,
+ .e_shentsize = sizeof(ElfW(Shdr)),
+ .e_shnum = ARRAY_SIZE(img->shdr),
+ .e_shstrndx = ARRAY_SIZE(img->shdr) - 1,
+#ifdef ELF_HOST_FLAGS
+ .e_flags = ELF_HOST_FLAGS,
+#endif
+#ifdef ELF_OSABI
+ .e_ident[EI_OSABI] = ELF_OSABI,
+#endif
+ },
+ .phdr = {
+ .p_type = PT_LOAD,
+ .p_flags = PF_X,
+ },
+ .shdr = {
+ [0] = { .sh_type = SHT_NULL },
+ /* Trick: The contents of code_gen_buffer are not present in
+ this fake ELF file; that got allocated elsewhere. Therefore
+ we mark .text as SHT_NOBITS (similar to .bss) so that readers
+ will not look for contents. We can record any address. */
+ [1] = { /* .text */
+ .sh_type = SHT_NOBITS,
+ .sh_flags = SHF_EXECINSTR | SHF_ALLOC,
+ },
+ [2] = { /* .debug_info */
+ .sh_type = SHT_PROGBITS,
+ .sh_offset = offsetof(struct ElfImage, di),
+ .sh_size = sizeof(struct DebugInfo),
+ },
+ [3] = { /* .debug_abbrev */
+ .sh_type = SHT_PROGBITS,
+ .sh_offset = offsetof(struct ElfImage, da),
+ .sh_size = sizeof(img->da),
+ },
+ [4] = { /* .debug_frame */
+ .sh_type = SHT_PROGBITS,
+ .sh_offset = sizeof(struct ElfImage),
+ },
+ [5] = { /* .symtab */
+ .sh_type = SHT_SYMTAB,
+ .sh_offset = offsetof(struct ElfImage, sym),
+ .sh_size = sizeof(img->sym),
+ .sh_info = 1,
+ .sh_link = ARRAY_SIZE(img->shdr) - 1,
+ .sh_entsize = sizeof(ElfW(Sym)),
+ },
+ [6] = { /* .strtab */
+ .sh_type = SHT_STRTAB,
+ .sh_offset = offsetof(struct ElfImage, str),
+ .sh_size = sizeof(img->str),
+ }
+ },
+ .sym = {
+ [1] = { /* code_gen_buffer */
+ .st_info = ELF_ST_INFO(STB_GLOBAL, STT_FUNC),
+ .st_shndx = 1,
+ }
+ },
+ .di = {
+ .len = sizeof(struct DebugInfo) - 4,
+ .version = 2,
+ .ptr_size = sizeof(void *),
+ .cu_die = 1,
+ .cu_lang = 0x8001, /* DW_LANG_Mips_Assembler */
+ .fn_die = 2,
+ .fn_name = "code_gen_buffer"
+ },
+ .da = {
+ 1, /* abbrev number (the cu) */
+ 0x11, 1, /* DW_TAG_compile_unit, has children */
+ 0x13, 0x5, /* DW_AT_language, DW_FORM_data2 */
+ 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
+ 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
+ 0, 0, /* end of abbrev */
+ 2, /* abbrev number (the fn) */
+ 0x2e, 0, /* DW_TAG_subprogram, no children */
+ 0x3, 0x8, /* DW_AT_name, DW_FORM_string */
+ 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
+ 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
+ 0, 0, /* end of abbrev */
+ 0 /* no more abbrev */
+ },
+ .str = "\0" ".text\0" ".debug_info\0" ".debug_abbrev\0"
+ ".debug_frame\0" ".symtab\0" ".strtab\0" "code_gen_buffer",
+ };
+
+ /* We only need a single jit entry; statically allocate it. */
+ static struct jit_code_entry one_entry;
+
+ uintptr_t buf = (uintptr_t)buf_ptr;
+ size_t img_size = sizeof(struct ElfImage) + debug_frame_size;
+
+ img = g_malloc(img_size);
+ *img = img_template;
+ memcpy(img + 1, debug_frame, debug_frame_size);
+
+ img->phdr.p_vaddr = buf;
+ img->phdr.p_paddr = buf;
+ img->phdr.p_memsz = buf_size;
+
+ img->shdr[1].sh_name = find_string(img->str, ".text");
+ img->shdr[1].sh_addr = buf;
+ img->shdr[1].sh_size = buf_size;
+
+ img->shdr[2].sh_name = find_string(img->str, ".debug_info");
+ img->shdr[3].sh_name = find_string(img->str, ".debug_abbrev");
+
+ img->shdr[4].sh_name = find_string(img->str, ".debug_frame");
+ img->shdr[4].sh_size = debug_frame_size;
+
+ img->shdr[5].sh_name = find_string(img->str, ".symtab");
+ img->shdr[6].sh_name = find_string(img->str, ".strtab");
+
+ img->sym[1].st_name = find_string(img->str, "code_gen_buffer");
+ img->sym[1].st_value = buf;
+ img->sym[1].st_size = buf_size;
+
+ img->di.cu_low_pc = buf;
+ img->di.cu_high_pc = buf_size;
+ img->di.fn_low_pc = buf;
+ img->di.fn_high_pc = buf_size;
+
+#ifdef DEBUG_JIT
+ /* Enable this block to be able to debug the ELF image file creation.
+ One can use readelf, objdump, or other inspection utilities. */
+ {
+ FILE *f = fopen("/tmp/qemu.jit", "w+b");
+ if (f) {
+ if (fwrite(img, img_size, 1, f) != img_size) {
+ /* Avoid stupid unused return value warning for fwrite. */
+ }
+ fclose(f);
+ }
+ }
+#endif
+
+ one_entry.symfile_addr = img;
+ one_entry.symfile_size = img_size;
+
+ __jit_debug_descriptor.action_flag = JIT_REGISTER_FN;
+ __jit_debug_descriptor.relevant_entry = &one_entry;
+ __jit_debug_descriptor.first_entry = &one_entry;
+ __jit_debug_register_code();
+}
+#else
+/* No support for the feature. Provide the entry point expected by exec.c,
+ and implement the internal function we declared earlier. */
+
+static void tcg_register_jit_int(void *buf, size_t size,
+ void *debug_frame, size_t debug_frame_size)
+{
+}
+
+void tcg_register_jit(void *buf, size_t buf_size)
+{
+}
+#endif /* ELF_HOST_MACHINE */
diff --git a/tcg/tcg.h b/tcg/tcg.h
index cc223ea540..a83bdddba4 100644
--- a/tcg/tcg.h
+++ b/tcg/tcg.h
@@ -252,11 +252,6 @@ typedef int TCGv_i64;
#define TCGV_UNUSED_I64(x) x = MAKE_TCGV_I64(-1)
/* call flags */
-#define TCG_CALL_TYPE_MASK 0x000f
-#define TCG_CALL_TYPE_STD 0x0000 /* standard C call */
-#define TCG_CALL_TYPE_REGPARM_1 0x0001 /* i386 style regparm call (1 reg) */
-#define TCG_CALL_TYPE_REGPARM_2 0x0002 /* i386 style regparm call (2 regs) */
-#define TCG_CALL_TYPE_REGPARM 0x0003 /* i386 style regparm call (3 regs) */
/* A pure function only reads its arguments and TCG global variables
and cannot raise exceptions. Hence a call to a pure function can be
safely suppressed if the return value is not used. */
@@ -337,7 +332,7 @@ typedef struct TCGContext TCGContext;
struct TCGContext {
uint8_t *pool_cur, *pool_end;
- TCGPool *pool_first, *pool_current;
+ TCGPool *pool_first, *pool_current, *pool_first_large;
TCGLabel *labels;
int nb_labels;
TCGTemp *temps; /* globals first, temps after */
@@ -589,5 +584,7 @@ extern uint8_t code_gen_prologue[];
/* TCG targets may use a different definition of tcg_qemu_tb_exec. */
#if !defined(tcg_qemu_tb_exec)
# define tcg_qemu_tb_exec(env, tb_ptr) \
- ((long REGPARM (*)(void *, void *))code_gen_prologue)(env, tb_ptr)
+ ((tcg_target_ulong (*)(void *, void *))code_gen_prologue)(env, tb_ptr)
#endif
+
+void tcg_register_jit(void *buf, size_t buf_size);
diff --git a/tcg/tci/tcg-target.c b/tcg/tci/tcg-target.c
index bd85073662..453f1875e2 100644
--- a/tcg/tci/tcg-target.c
+++ b/tcg/tci/tcg-target.c
@@ -798,6 +798,9 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
case INDEX_op_qemu_st8:
case INDEX_op_qemu_st16:
case INDEX_op_qemu_st32:
+#ifdef CONFIG_TCG_PASS_AREG0
+ tcg_out_r(s, TCG_AREG0);
+#endif
tcg_out_r(s, *args++);
tcg_out_r(s, *args++);
#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
@@ -808,6 +811,9 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
#endif
break;
case INDEX_op_qemu_st64:
+#ifdef CONFIG_TCG_PASS_AREG0
+ tcg_out_r(s, TCG_AREG0);
+#endif
tcg_out_r(s, *args++);
#if TCG_TARGET_REG_BITS == 32
tcg_out_r(s, *args++);
diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h
index b61e99aff1..30a0f21596 100644
--- a/tcg/tci/tcg-target.h
+++ b/tcg/tci/tcg-target.h
@@ -154,7 +154,7 @@ typedef enum {
void tci_disas(uint8_t opc);
-unsigned long tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr);
+tcg_target_ulong tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr);
#define tcg_qemu_tb_exec tcg_qemu_tb_exec
static inline void flush_icache_range(tcg_target_ulong start,