summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorbellard <bellard@c046a42c-6fe2-441c-8c8c-71466251a162>2003-05-25 16:46:15 +0000
committerbellard <bellard@c046a42c-6fe2-441c-8c8c-71466251a162>2003-05-25 16:46:15 +0000
commitd4e8164f7e9342d692c1d6f1c848ed05f8007ece (patch)
treeca8f3b46553b2674eb5ab3297b39db75d78ba4d4
parent08351fb37ae0abe0d0a025ad67709f1f1fd63d59 (diff)
downloadqemu-d4e8164f7e9342d692c1d6f1c848ed05f8007ece.tar.gz
direct chaining for PowerPC and i386
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@183 c046a42c-6fe2-441c-8c8c-71466251a162
-rw-r--r--dyngen.c96
-rw-r--r--exec-i386.c38
-rw-r--r--exec-i386.h2
-rw-r--r--exec.c111
-rw-r--r--exec.h264
-rw-r--r--op-i386.c128
-rw-r--r--opc-i386.h49
-rw-r--r--ops_template.h24
-rw-r--r--translate-i386.c118
9 files changed, 620 insertions, 210 deletions
diff --git a/dyngen.c b/dyngen.c
index f037d87590..96a47c8edf 100644
--- a/dyngen.c
+++ b/dyngen.c
@@ -170,7 +170,16 @@ void elf_swap_phdr(struct elf_phdr *h)
swabls(&h->p_align); /* Segment alignment */
}
+/* ELF file info */
int do_swap;
+struct elf_shdr *shdr;
+struct elfhdr ehdr;
+ElfW(Sym) *symtab;
+int nb_syms;
+char *strtab;
+/* data section */
+uint8_t *data_data;
+int data_shndx;
uint16_t get16(uint16_t *p)
{
@@ -270,7 +279,7 @@ int strstart(const char *str, const char *val, const char **ptr)
/* generate op code */
void gen_code(const char *name, host_ulong offset, host_ulong size,
FILE *outfile, uint8_t *text, ELF_RELOC *relocs, int nb_relocs, int reloc_sh_type,
- ElfW(Sym) *symtab, char *strtab, int gen_switch)
+ int gen_switch)
{
int copy_size = 0;
uint8_t *p_start, *p_end;
@@ -291,13 +300,16 @@ void gen_code(const char *name, host_ulong offset, host_ulong size,
switch(ELF_ARCH) {
case EM_386:
{
- uint8_t *p;
- p = p_end - 1;
- if (p == p_start)
+ int len;
+ len = p_end - p_start;
+ if (len == 0)
error("empty code for %s", name);
- if (p[0] != 0xc3)
- error("ret expected at the end of %s", name);
- copy_size = p - p_start;
+ if (p_end[-1] == 0xc3) {
+ len--;
+ } else {
+ error("ret or jmp expected at the end of %s", name);
+ }
+ copy_size = len;
}
break;
case EM_PPC:
@@ -423,7 +435,7 @@ void gen_code(const char *name, host_ulong offset, host_ulong size,
sym_name = strtab + symtab[ELFW(R_SYM)(rel->r_info)].st_name;
if (strstart(sym_name, "__op_param", &p)) {
n = strtoul(p, NULL, 10);
- if (n >= MAX_ARGS)
+ if (n > MAX_ARGS)
error("too many arguments in %s", name);
args_present[n - 1] = 1;
}
@@ -459,7 +471,9 @@ void gen_code(const char *name, host_ulong offset, host_ulong size,
if (rel->r_offset >= start_offset &&
rel->r_offset < start_offset + copy_size) {
sym_name = strtab + symtab[ELFW(R_SYM)(rel->r_info)].st_name;
- if (*sym_name && !strstart(sym_name, "__op_param", &p)) {
+ if (*sym_name &&
+ !strstart(sym_name, "__op_param", NULL) &&
+ !strstart(sym_name, "__op_jmp", NULL)) {
#if defined(HOST_SPARC)
if (sym_name[0] == '.') {
fprintf(outfile,
@@ -474,6 +488,31 @@ void gen_code(const char *name, host_ulong offset, host_ulong size,
}
fprintf(outfile, " memcpy(gen_code_ptr, (void *)((char *)&%s+%d), %d);\n", name, start_offset - offset, copy_size);
+
+ /* emit code offset information */
+ {
+ ElfW(Sym) *sym;
+ const char *sym_name, *p;
+ target_ulong val;
+ int n;
+
+ for(i = 0, sym = symtab; i < nb_syms; i++, sym++) {
+ sym_name = strtab + sym->st_name;
+ if (strstart(sym_name, "__op_label", &p)) {
+ /* test if the variable refers to a label inside
+ the code we are generating */
+ if (sym->st_shndx != data_shndx)
+ error("__op_labelN symbols must be in .data or .sdata section");
+ val = *(target_ulong *)(data_data + sym->st_value);
+ if (val >= start_offset && val < start_offset + copy_size) {
+ n = strtol(p, NULL, 10);
+ fprintf(outfile, " label_offsets[%d] = %d + (gen_code_ptr - gen_code_buf);\n", n, val - start_offset);
+ }
+ }
+ }
+ }
+
+ /* load parameres in variables */
for(i = 0; i < nb_args; i++) {
fprintf(outfile, " param%d = *opparam_ptr++;\n", i + 1);
}
@@ -519,6 +558,18 @@ void gen_code(const char *name, host_ulong offset, host_ulong size,
if (rel->r_offset >= start_offset &&
rel->r_offset < start_offset + copy_size) {
sym_name = strtab + symtab[ELFW(R_SYM)(rel->r_info)].st_name;
+ if (strstart(sym_name, "__op_jmp", &p)) {
+ int n;
+ n = strtol(p, NULL, 10);
+ /* __op_jmp relocations are done at
+ runtime to do translated block
+ chaining: the offset of the instruction
+ needs to be stored */
+ fprintf(outfile, " jmp_offsets[%d] = %d + (gen_code_ptr - gen_code_buf);\n",
+ n, rel->r_offset - start_offset);
+ continue;
+ }
+
if (strstart(sym_name, "__op_param", &p)) {
snprintf(name, sizeof(name), "param%s", p);
} else {
@@ -824,11 +875,10 @@ void gen_code(const char *name, host_ulong offset, host_ulong size,
int load_elf(const char *filename, FILE *outfile, int do_print_enum)
{
int fd;
- struct elfhdr ehdr;
- struct elf_shdr *sec, *shdr, *symtab_sec, *strtab_sec, *text_sec;
- int i, j, nb_syms;
- ElfW(Sym) *symtab, *sym;
- char *shstr, *strtab;
+ struct elf_shdr *sec, *symtab_sec, *strtab_sec, *text_sec;
+ int i, j;
+ ElfW(Sym) *sym;
+ char *shstr, *data_name;
uint8_t *text;
void *relocs;
int nb_relocs, reloc_sh_type;
@@ -880,6 +930,17 @@ int load_elf(const char *filename, FILE *outfile, int do_print_enum)
error("could not find .text section");
text = load_data(fd, text_sec->sh_offset, text_sec->sh_size);
+#if defined(HOST_PPC)
+ data_name = ".sdata";
+#else
+ data_name = ".data";
+#endif
+ sec = find_elf_section(shdr, ehdr.e_shnum, shstr, data_name);
+ if (!sec)
+ error("could not find %s section", data_name);
+ data_shndx = sec - shdr;
+ data_data = load_data(fd, sec->sh_offset, sec->sh_size);
+
/* find text relocations, if any */
nb_relocs = 0;
relocs = NULL;
@@ -936,7 +997,7 @@ int load_elf(const char *filename, FILE *outfile, int do_print_enum)
name = strtab + sym->st_name;
if (strstart(name, OP_PREFIX, &p)) {
gen_code(name, sym->st_value, sym->st_size, outfile,
- text, relocs, nb_relocs, reloc_sh_type, symtab, strtab, 2);
+ text, relocs, nb_relocs, reloc_sh_type, 2);
}
}
} else {
@@ -963,6 +1024,7 @@ fprintf(outfile,
#endif
fprintf(outfile,
"int dyngen_code(uint8_t *gen_code_buf,\n"
+" uint16_t *label_offsets, uint16_t *jmp_offsets,\n"
" const uint16_t *opc_buf, const uint32_t *opparam_buf)\n"
"{\n"
" uint8_t *gen_code_ptr;\n"
@@ -1001,7 +1063,7 @@ fprintf(outfile,
if (sym->st_shndx != (text_sec - shdr))
error("invalid section for opcode (0x%x)", sym->st_shndx);
gen_code(name, sym->st_value, sym->st_size, outfile,
- text, relocs, nb_relocs, reloc_sh_type, symtab, strtab, 1);
+ text, relocs, nb_relocs, reloc_sh_type, 1);
}
}
@@ -1056,7 +1118,7 @@ fprintf(outfile,
if (sym->st_shndx != (text_sec - shdr))
error("invalid section for opcode (0x%x)", sym->st_shndx);
gen_code(name, sym->st_value, sym->st_size, outfile,
- text, relocs, nb_relocs, reloc_sh_type, symtab, strtab, 0);
+ text, relocs, nb_relocs, reloc_sh_type, 0);
}
}
}
diff --git a/exec-i386.c b/exec-i386.c
index 7575123ec5..978b1da9d8 100644
--- a/exec-i386.c
+++ b/exec-i386.c
@@ -120,7 +120,7 @@ int cpu_x86_exec(CPUX86State *env1)
TranslationBlock *tb, **ptb;
uint8_t *tc_ptr, *cs_base, *pc;
unsigned int flags;
-
+
/* first we save global registers */
saved_T0 = T0;
saved_T1 = T1;
@@ -169,6 +169,7 @@ int cpu_x86_exec(CPUX86State *env1)
/* prepare setjmp context for exception handling */
if (setjmp(env->jmp_env) == 0) {
+ T0 = 0; /* force lookup of first TB */
for(;;) {
if (env->interrupt_request) {
raise_exception(EXCP_INTERRUPT);
@@ -209,30 +210,40 @@ int cpu_x86_exec(CPUX86State *env1)
flags |= (env->eflags & TF_MASK) << (GEN_FLAG_TF_SHIFT - 8);
cs_base = env->seg_cache[R_CS].base;
pc = cs_base + env->eip;
+ spin_lock(&tb_lock);
tb = tb_find(&ptb, (unsigned long)pc, (unsigned long)cs_base,
flags);
if (!tb) {
/* if no translated code available, then translate it now */
- /* very inefficient but safe: we lock all the cpus
- when generating code */
- spin_lock(&tb_lock);
+ tb = tb_alloc((unsigned long)pc);
+ if (!tb) {
+ /* flush must be done */
+ tb_flush();
+ /* cannot fail at this point */
+ tb = tb_alloc((unsigned long)pc);
+ /* don't forget to invalidate previous TB info */
+ ptb = &tb_hash[tb_hash_func((unsigned long)pc)];
+ T0 = 0;
+ }
tc_ptr = code_gen_ptr;
+ tb->tc_ptr = tc_ptr;
ret = cpu_x86_gen_code(code_gen_ptr, CODE_GEN_MAX_SIZE,
&code_gen_size, pc, cs_base, flags,
- &code_size);
+ &code_size, tb);
/* if invalid instruction, signal it */
if (ret != 0) {
+ /* NOTE: the tb is allocated but not linked, so we
+ can leave it */
spin_unlock(&tb_lock);
raise_exception(EXCP06_ILLOP);
}
- tb = tb_alloc((unsigned long)pc, code_size);
*ptb = tb;
+ tb->size = code_size;
tb->cs_base = (unsigned long)cs_base;
tb->flags = flags;
- tb->tc_ptr = tc_ptr;
tb->hash_next = NULL;
+ tb_link(tb);
code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
- spin_unlock(&tb_lock);
}
#ifdef DEBUG_EXEC
if (loglevel) {
@@ -241,14 +252,21 @@ int cpu_x86_exec(CPUX86State *env1)
lookup_symbol((void *)tb->pc));
}
#endif
- /* execute the generated code */
+
+ /* see if we can patch the calling TB */
+ if (T0 != 0 && !(env->eflags & TF_MASK)) {
+ tb_add_jump((TranslationBlock *)(T0 & ~3), T0 & 3, tb);
+ }
tc_ptr = tb->tc_ptr;
+ spin_unlock(&tb_lock);
+
+ /* execute the generated code */
gen_func = (void *)tc_ptr;
#ifdef __sparc__
__asm__ __volatile__("call %0\n\t"
" mov %%o7,%%i0"
: /* no outputs */
- : "r" (gen_func)
+ : "r" (gen_func)
: "i0", "i1", "i2", "i3", "i4", "i5");
#else
gen_func();
diff --git a/exec-i386.h b/exec-i386.h
index 322b7f3d1c..938680d1e5 100644
--- a/exec-i386.h
+++ b/exec-i386.h
@@ -205,8 +205,10 @@ extern int __op_param1, __op_param2, __op_param3;
#define PARAM2 ((long)(&__op_param2))
#define PARAM3 ((long)(&__op_param3))
#endif
+extern int __op_jmp0, __op_jmp1;
#include "cpu-i386.h"
+#include "exec.h"
typedef struct CCTable {
int (*compute_all)(void); /* return all the flags */
diff --git a/exec.c b/exec.c
index 936424ad8f..8f332c9aed 100644
--- a/exec.c
+++ b/exec.c
@@ -27,6 +27,7 @@
#include <sys/mman.h>
#include "cpu-i386.h"
+#include "exec.h"
//#define DEBUG_TB_INVALIDATE
#define DEBUG_FLUSH
@@ -212,6 +213,7 @@ static void page_flush_tb(void)
}
/* flush all the translation blocks */
+/* XXX: tb_flush is currently not thread safe */
void tb_flush(void)
{
int i;
@@ -226,7 +228,8 @@ void tb_flush(void)
tb_hash[i] = NULL;
page_flush_tb();
code_gen_ptr = code_gen_buffer;
- /* XXX: flush processor icache at this point */
+ /* XXX: flush processor icache at this point if cache flush is
+ expensive */
}
#ifdef DEBUG_TB_CHECK
@@ -265,6 +268,26 @@ static void tb_page_check(void)
}
}
+void tb_jmp_check(TranslationBlock *tb)
+{
+ TranslationBlock *tb1;
+ unsigned int n1;
+
+ /* suppress any remaining jumps to this TB */
+ tb1 = tb->jmp_first;
+ for(;;) {
+ n1 = (long)tb1 & 3;
+ tb1 = (TranslationBlock *)((long)tb1 & ~3);
+ if (n1 == 2)
+ break;
+ tb1 = tb1->jmp_next[n1];
+ }
+ /* check end of list */
+ if (tb1 != tb) {
+ printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
+ }
+}
+
#endif
/* invalidate one TB */
@@ -282,12 +305,48 @@ static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
}
}
+static inline void tb_jmp_remove(TranslationBlock *tb, int n)
+{
+ TranslationBlock *tb1, **ptb;
+ unsigned int n1;
+
+ ptb = &tb->jmp_next[n];
+ tb1 = *ptb;
+ if (tb1) {
+ /* find tb(n) in circular list */
+ for(;;) {
+ tb1 = *ptb;
+ n1 = (long)tb1 & 3;
+ tb1 = (TranslationBlock *)((long)tb1 & ~3);
+ if (n1 == n && tb1 == tb)
+ break;
+ if (n1 == 2) {
+ ptb = &tb1->jmp_first;
+ } else {
+ ptb = &tb1->jmp_next[n1];
+ }
+ }
+ /* now we can suppress tb(n) from the list */
+ *ptb = tb->jmp_next[n];
+
+ tb->jmp_next[n] = NULL;
+ }
+}
+
+/* reset the jump entry 'n' of a TB so that it is not chained to
+ another TB */
+static inline void tb_reset_jump(TranslationBlock *tb, int n)
+{
+ tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
+}
+
static inline void tb_invalidate(TranslationBlock *tb, int parity)
{
PageDesc *p;
unsigned int page_index1, page_index2;
- unsigned int h;
-
+ unsigned int h, n1;
+ TranslationBlock *tb1, *tb2;
+
/* remove the TB from the hash list */
h = tb_hash_func(tb->pc);
tb_remove(&tb_hash[h], tb,
@@ -305,6 +364,24 @@ static inline void tb_invalidate(TranslationBlock *tb, int parity)
tb_remove(&p->first_tb, tb,
offsetof(TranslationBlock, page_next[page_index2 & 1]));
}
+
+ /* suppress this TB from the two jump lists */
+ tb_jmp_remove(tb, 0);
+ tb_jmp_remove(tb, 1);
+
+ /* suppress any remaining jumps to this TB */
+ tb1 = tb->jmp_first;
+ for(;;) {
+ n1 = (long)tb1 & 3;
+ if (n1 == 2)
+ break;
+ tb1 = (TranslationBlock *)((long)tb1 & ~3);
+ tb2 = tb1->jmp_next[n1];
+ tb_reset_jump(tb1, n1);
+ tb1->jmp_next[n1] = NULL;
+ tb1 = tb2;
+ }
+ tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
}
/* invalidate all TBs which intersect with the target page starting at addr */
@@ -367,27 +444,39 @@ static inline void tb_alloc_page(TranslationBlock *tb, unsigned int page_index)
/* Allocate a new translation block. Flush the translation buffer if
too many translation blocks or too much generated code. */
-TranslationBlock *tb_alloc(unsigned long pc,
- unsigned long size)
+TranslationBlock *tb_alloc(unsigned long pc)
{
TranslationBlock *tb;
- unsigned int page_index1, page_index2;
if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
(code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
- tb_flush();
+ return NULL;
tb = &tbs[nb_tbs++];
tb->pc = pc;
- tb->size = size;
+ return tb;
+}
+
+/* link the tb with the other TBs */
+void tb_link(TranslationBlock *tb)
+{
+ unsigned int page_index1, page_index2;
/* add in the page list */
- page_index1 = pc >> TARGET_PAGE_BITS;
+ page_index1 = tb->pc >> TARGET_PAGE_BITS;
tb_alloc_page(tb, page_index1);
- page_index2 = (pc + size - 1) >> TARGET_PAGE_BITS;
+ page_index2 = (tb->pc + tb->size - 1) >> TARGET_PAGE_BITS;
if (page_index2 != page_index1) {
tb_alloc_page(tb, page_index2);
}
- return tb;
+ tb->jmp_first = (TranslationBlock *)((long)tb | 2);
+ tb->jmp_next[0] = NULL;
+ tb->jmp_next[1] = NULL;
+
+ /* init original jump addresses */
+ if (tb->tb_next_offset[0] != 0xffff)
+ tb_reset_jump(tb, 0);
+ if (tb->tb_next_offset[1] != 0xffff)
+ tb_reset_jump(tb, 1);
}
/* called from signal handler: invalidate the code and unprotect the
diff --git a/exec.h b/exec.h
new file mode 100644
index 0000000000..c6a6d1b23e
--- /dev/null
+++ b/exec.h
@@ -0,0 +1,264 @@
+/*
+ * internal execution defines for qemu
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#define GEN_FLAG_CODE32_SHIFT 0
+#define GEN_FLAG_ADDSEG_SHIFT 1
+#define GEN_FLAG_SS32_SHIFT 2
+#define GEN_FLAG_VM_SHIFT 3
+#define GEN_FLAG_ST_SHIFT 4
+#define GEN_FLAG_CPL_SHIFT 7
+#define GEN_FLAG_IOPL_SHIFT 9
+#define GEN_FLAG_TF_SHIFT 11
+
+struct TranslationBlock;
+int cpu_x86_gen_code(uint8_t *gen_code_buf, int max_code_size,
+ int *gen_code_size_ptr,
+ uint8_t *pc_start, uint8_t *cs_base, int flags,
+ int *code_size_ptr, struct TranslationBlock *tb);
+void cpu_x86_tblocks_init(void);
+void page_init(void);
+int page_unprotect(unsigned long address);
+
+#define CODE_GEN_MAX_SIZE 65536
+#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
+
+#define CODE_GEN_HASH_BITS 15
+#define CODE_GEN_HASH_SIZE (1 << CODE_GEN_HASH_BITS)
+
+/* maximum total translate dcode allocated */
+#define CODE_GEN_BUFFER_SIZE (2048 * 1024)
+//#define CODE_GEN_BUFFER_SIZE (128 * 1024)
+
+#if defined(__powerpc__)
+#define USE_DIRECT_JUMP
+#endif
+
+typedef struct TranslationBlock {
+ unsigned long pc; /* simulated PC corresponding to this block (EIP + CS base) */
+ unsigned long cs_base; /* CS base for this block */
+ unsigned int flags; /* flags defining in which context the code was generated */
+ uint16_t size; /* size of target code for this block (1 <=
+ size <= TARGET_PAGE_SIZE) */
+ uint8_t *tc_ptr; /* pointer to the translated code */
+ struct TranslationBlock *hash_next; /* next matching block */
+ struct TranslationBlock *page_next[2]; /* next blocks in even/odd page */
+ /* the following data are used to directly call another TB from
+ the code of this one. */
+ uint16_t tb_next_offset[2]; /* offset of original jump target */
+#ifdef USE_DIRECT_JUMP
+ uint16_t tb_jmp_offset[2]; /* offset of jump instruction */
+#else
+ uint8_t *tb_next[2]; /* address of jump generated code */
+#endif
+ /* list of TBs jumping to this one. This is a circular list using
+ the two least significant bits of the pointers to tell what is
+ the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 =
+ jmp_first */
+ struct TranslationBlock *jmp_next[2];
+ struct TranslationBlock *jmp_first;
+} TranslationBlock;
+
+static inline unsigned int tb_hash_func(unsigned long pc)
+{
+ return pc & (CODE_GEN_HASH_SIZE - 1);
+}
+
+TranslationBlock *tb_alloc(unsigned long pc);
+void tb_flush(void);
+void tb_link(TranslationBlock *tb);
+
+extern TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
+
+extern uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
+extern uint8_t *code_gen_ptr;
+
+/* find a translation block in the translation cache. If not found,
+ return NULL and the pointer to the last element of the list in pptb */
+static inline TranslationBlock *tb_find(TranslationBlock ***pptb,
+ unsigned long pc,
+ unsigned long cs_base,
+ unsigned int flags)
+{
+ TranslationBlock **ptb, *tb;
+ unsigned int h;
+
+ h = tb_hash_func(pc);
+ ptb = &tb_hash[h];
+ for(;;) {
+ tb = *ptb;
+ if (!tb)
+ break;
+ if (tb->pc == pc && tb->cs_base == cs_base && tb->flags == flags)
+ return tb;
+ ptb = &tb->hash_next;
+ }
+ *pptb = ptb;
+ return NULL;
+}
+
+#if defined(__powerpc__)
+
+static inline void tb_set_jmp_target(TranslationBlock *tb,
+ int n, unsigned long addr)
+{
+ uint32_t val, *ptr;
+ unsigned long offset;
+
+ offset = (unsigned long)(tb->tc_ptr + tb->tb_jmp_offset[n]);
+
+ /* patch the branch destination */
+ ptr = (uint32_t *)offset;
+ val = *ptr;
+ val = (val & ~0x03fffffc) | ((addr - offset) & 0x03fffffc);
+ *ptr = val;
+ /* flush icache */
+ asm volatile ("dcbst 0,%0" : : "r"(ptr) : "memory");
+ asm volatile ("sync" : : : "memory");
+ asm volatile ("icbi 0,%0" : : "r"(ptr) : "memory");
+ asm volatile ("sync" : : : "memory");
+ asm volatile ("isync" : : : "memory");
+}
+
+#else
+
+/* set the jump target */
+static inline void tb_set_jmp_target(TranslationBlock *tb,
+ int n, unsigned long addr)
+{
+ tb->tb_next[n] = (void *)addr;
+}
+
+#endif
+
+static inline void tb_add_jump(TranslationBlock *tb, int n,
+ TranslationBlock *tb_next)
+{
+ /* patch the native jump address */
+ tb_set_jmp_target(tb, n, (unsigned long)tb_next->tc_ptr);
+
+ /* add in TB jmp circular list */
+ tb->jmp_next[n] = tb_next->jmp_first;
+ tb_next->jmp_first = (TranslationBlock *)((long)(tb) | (n));
+}
+
+#ifndef offsetof
+#define offsetof(type, field) ((size_t) &((type *)0)->field)
+#endif
+
+#ifdef __powerpc__
+static inline int testandset (int *p)
+{
+ int ret;
+ __asm__ __volatile__ (
+ "0: lwarx %0,0,%1 ;"
+ " xor. %0,%3,%0;"
+ " bne 1f;"
+ " stwcx. %2,0,%1;"
+ " bne- 0b;"
+ "1: "
+ : "=&r" (ret)
+ : "r" (p), "r" (1), "r" (0)
+ : "cr0", "memory");
+ return ret;
+}
+#endif
+
+#ifdef __i386__
+static inline int testandset (int *p)
+{
+ char ret;
+ long int readval;
+
+ __asm__ __volatile__ ("lock; cmpxchgl %3, %1; sete %0"
+ : "=q" (ret), "=m" (*p), "=a" (readval)
+ : "r" (1), "m" (*p), "a" (0)
+ : "memory");
+ return ret;
+}
+#endif
+
+#ifdef __s390__
+static inline int testandset (int *p)
+{
+ int ret;
+
+ __asm__ __volatile__ ("0: cs %0,%1,0(%2)\n"
+ " jl 0b"
+ : "=&d" (ret)
+ : "r" (1), "a" (p), "0" (*p)
+ : "cc", "memory" );
+ return ret;
+}
+#endif
+
+#ifdef __alpha__
+int testandset (int *p)
+{
+ int ret;
+ unsigned long one;
+
+ __asm__ __volatile__ ("0: mov 1,%2\n"
+ " ldl_l %0,%1\n"
+ " stl_c %2,%1\n"
+ " beq %2,1f\n"
+ ".subsection 2\n"
+ "1: br 0b\n"
+ ".previous"
+ : "=r" (ret), "=m" (*p), "=r" (one)
+ : "m" (*p));
+ return ret;
+}
+#endif
+
+#ifdef __sparc__
+static inline int testandset (int *p)
+{
+ int ret;
+
+ __asm__ __volatile__("ldstub [%1], %0"
+ : "=r" (ret)
+ : "r" (p)
+ : "memory");
+
+ return (ret ? 1 : 0);
+}
+#endif
+
+typedef int spinlock_t;
+
+#define SPIN_LOCK_UNLOCKED 0
+
+static inline void spin_lock(spinlock_t *lock)
+{
+ while (testandset(lock));
+}
+
+static inline void spin_unlock(spinlock_t *lock)
+{
+ *lock = 0;
+}
+
+static inline int spin_trylock(spinlock_t *lock)
+{
+ return !testandset(lock);
+}
+
+extern spinlock_t tb_lock;
+
diff --git a/op-i386.c b/op-i386.c
index 64cbe708a5..cfae103639 100644
--- a/op-i386.c
+++ b/op-i386.c
@@ -709,7 +709,44 @@ void OPPROTO op_cmpxchg8b(void)
FORCE_RET();
}
-/* string ops */
+#if defined(__powerpc__)
+
+/* on PowerPC we patch the jump instruction directly */
+#define JUMP_TB(tbparam, n, eip)\
+do {\
+ static void __attribute__((unused)) *__op_label ## n = &&label ## n;\
+ asm volatile ("b %0" : : "i" (&__op_jmp ## n));\
+label ## n:\
+ T0 = (long)(tbparam) + (n);\
+ EIP = eip;\
+} while (0)
+
+#else
+
+/* jump to next block operations (more portable code, does not need
+ cache flushing, but slower because of indirect jump) */
+#define JUMP_TB(tbparam, n, eip)\
+do {\
+ static void __attribute__((unused)) *__op_label ## n = &&label ## n;\
+ goto *((TranslationBlock *)tbparam)->tb_next[n];\
+label ## n:\
+ T0 = (long)(tbparam) + (n);\
+ EIP = eip;\
+} while (0)
+
+#endif
+
+void OPPROTO op_jmp_tb_next(void)
+{
+ JUMP_TB(PARAM1, 0, PARAM2);
+}
+
+void OPPROTO op_movl_T0_0(void)
+{
+ T0 = 0;
+}
+
+/* multiple size ops */
#define ldul ldl
@@ -1199,90 +1236,15 @@ void OPPROTO op_lar(void)
/* flags handling */
-/* slow jumps cases (compute x86 flags) */
-void OPPROTO op_jo_cc(void)
-{
- int eflags;
- eflags = cc_table[CC_OP].compute_all();
- if (eflags & CC_O)
- EIP = PARAM1;
- else
- EIP = PARAM2;
- FORCE_RET();
-}
-
-void OPPROTO op_jb_cc(void)
-{
- if (cc_table[CC_OP].compute_c())
- EIP = PARAM1;
- else
- EIP = PARAM2;
- FORCE_RET();
-}
-
-void OPPROTO op_jz_cc(void)
-{
- int eflags;
- eflags = cc_table[CC_OP].compute_all();
- if (eflags & CC_Z)
- EIP = PARAM1;
- else
- EIP = PARAM2;
- FORCE_RET();
-}
-
-void OPPROTO op_jbe_cc(void)
+/* slow jumps cases : in order to avoid calling a function with a
+ pointer (which can generate a stack frame on PowerPC), we use
+ op_setcc to set T0 and then call op_jcc. */
+void OPPROTO op_jcc(void)
{
- int eflags;
- eflags = cc_table[CC_OP].compute_all();
- if (eflags & (CC_Z | CC_C))
- EIP = PARAM1;
- else
- EIP = PARAM2;
- FORCE_RET();
-}
-
-void OPPROTO op_js_cc(void)
-{
- int eflags;
- eflags = cc_table[CC_OP].compute_all();
- if (eflags & CC_S)
- EIP = PARAM1;
- else
- EIP = PARAM2;
- FORCE_RET();
-}
-
-void OPPROTO op_jp_cc(void)
-{
- int eflags;
- eflags = cc_table[CC_OP].compute_all();
- if (eflags & CC_P)
- EIP = PARAM1;
- else
- EIP = PARAM2;
- FORCE_RET();
-}
-
-void OPPROTO op_jl_cc(void)
-{
- int eflags;
- eflags = cc_table[CC_OP].compute_all();
- if ((eflags ^ (eflags >> 4)) & 0x80)
- EIP = PARAM1;
- else
- EIP = PARAM2;
- FORCE_RET();
-}
-
-void OPPROTO op_jle_cc(void)
-{
- int eflags;
- eflags = cc_table[CC_OP].compute_all();
- if (((eflags ^ (eflags >> 4)) & 0x80) || (eflags & CC_Z))
- EIP = PARAM1;
+ if (T0)
+ JUMP_TB(PARAM1, 0, PARAM2);
else
- EIP = PARAM2;
+ JUMP_TB(PARAM1, 1, PARAM3);
FORCE_RET();
}
diff --git a/opc-i386.h b/opc-i386.h
index 6cfb42e88d..e658d4f79a 100644
--- a/opc-i386.h
+++ b/opc-i386.h
@@ -231,18 +231,20 @@ DEF(jmp_T0, 0)
DEF(jmp_im, 1)
DEF(int_im, 2)
DEF(raise_exception, 1)
-DEF(into, 0)
+DEF(into, 1)
DEF(cli, 0)
DEF(sti, 0)
DEF(boundw, 0)
DEF(boundl, 0)
DEF(cmpxchg8b, 0)
-DEF(jb_subb, 2)
-DEF(jz_subb, 2)
-DEF(jbe_subb, 2)
-DEF(js_subb, 2)
-DEF(jl_subb, 2)
-DEF(jle_subb, 2)
+DEF(jmp_tb_next, 2)
+DEF(movl_T0_0, 0)
+DEF(jb_subb, 3)
+DEF(jz_subb, 3)
+DEF(jbe_subb, 3)
+DEF(js_subb, 3)
+DEF(jl_subb, 3)
+DEF(jle_subb, 3)
DEF(setb_T0_subb, 0)
DEF(setz_T0_subb, 0)
DEF(setbe_T0_subb, 0)
@@ -314,12 +316,12 @@ DEF(insb_a16, 0)
DEF(rep_insb_a16, 0)
DEF(outb_T0_T1, 0)
DEF(inb_T0_T1, 0)
-DEF(jb_subw, 2)
-DEF(jz_subw, 2)
-DEF(jbe_subw, 2)
-DEF(js_subw, 2)
-DEF(jl_subw, 2)
-DEF(jle_subw, 2)
+DEF(jb_subw, 3)
+DEF(jz_subw, 3)
+DEF(jbe_subw, 3)
+DEF(js_subw, 3)
+DEF(jl_subw, 3)
+DEF(jle_subw, 3)
DEF(loopnzw, 2)
DEF(loopzw, 2)
DEF(loopw, 2)
@@ -405,12 +407,12 @@ DEF(insw_a16, 0)
DEF(rep_insw_a16, 0)
DEF(outw_T0_T1, 0)
DEF(inw_T0_T1, 0)
-DEF(jb_subl, 2)
-DEF(jz_subl, 2)
-DEF(jbe_subl, 2)
-DEF(js_subl, 2)
-DEF(jl_subl, 2)
-DEF(jle_subl, 2)
+DEF(jb_subl, 3)
+DEF(jz_subl, 3)
+DEF(jbe_subl, 3)
+DEF(js_subl, 3)
+DEF(jl_subl, 3)
+DEF(jle_subl, 3)
DEF(loopnzl, 2)
DEF(loopzl, 2)
DEF(loopl, 2)
@@ -536,14 +538,7 @@ DEF(movl_A0_seg, 1)
DEF(addl_A0_seg, 1)
DEF(lsl, 0)
DEF(lar, 0)
-DEF(jo_cc, 2)
-DEF(jb_cc, 2)
-DEF(jz_cc, 2)
-DEF(jbe_cc, 2)
-DEF(js_cc, 2)
-DEF(jp_cc, 2)
-DEF(jl_cc, 2)
-DEF(jle_cc, 2)
+DEF(jcc, 3)
DEF(seto_T0_cc, 0)
DEF(setb_T0_cc, 0)
DEF(setz_T0_cc, 0)
diff --git a/ops_template.h b/ops_template.h
index 7adf7be6a7..ff28086f32 100644
--- a/ops_template.h
+++ b/ops_template.h
@@ -238,18 +238,18 @@ void OPPROTO glue(op_jb_sub, SUFFIX)(void)
src2 = CC_SRC - CC_DST;
if ((DATA_TYPE)src1 < (DATA_TYPE)src2)
- EIP = PARAM1;
+ JUMP_TB(PARAM1, 0, PARAM2);
else
- EIP = PARAM2;
+ JUMP_TB(PARAM1, 1, PARAM3);
FORCE_RET();
}
void OPPROTO glue(op_jz_sub, SUFFIX)(void)
{
if ((DATA_TYPE)CC_DST == 0)
- EIP = PARAM1;
+ JUMP_TB(PARAM1, 0, PARAM2);
else
- EIP = PARAM2;
+ JUMP_TB(PARAM1, 1, PARAM3);
FORCE_RET();
}
@@ -260,18 +260,18 @@ void OPPROTO glue(op_jbe_sub, SUFFIX)(void)
src2 = CC_SRC - CC_DST;
if ((DATA_TYPE)src1 <= (DATA_TYPE)src2)
- EIP = PARAM1;
+ JUMP_TB(PARAM1, 0, PARAM2);
else
- EIP = PARAM2;
+ JUMP_TB(PARAM1, 1, PARAM3);
FORCE_RET();
}
void OPPROTO glue(op_js_sub, SUFFIX)(void)
{
if (CC_DST & SIGN_MASK)
- EIP = PARAM1;
+ JUMP_TB(PARAM1, 0, PARAM2);
else
- EIP = PARAM2;
+ JUMP_TB(PARAM1, 1, PARAM3);
FORCE_RET();
}
@@ -282,9 +282,9 @@ void OPPROTO glue(op_jl_sub, SUFFIX)(void)
src2 = CC_SRC - CC_DST;
if ((DATA_STYPE)src1 < (DATA_STYPE)src2)
- EIP = PARAM1;
+ JUMP_TB(PARAM1, 0, PARAM2);
else
- EIP = PARAM2;
+ JUMP_TB(PARAM1, 1, PARAM3);
FORCE_RET();
}
@@ -295,9 +295,9 @@ void OPPROTO glue(op_jle_sub, SUFFIX)(void)
src2 = CC_SRC - CC_DST;
if ((DATA_STYPE)src1 <= (DATA_STYPE)src2)
- EIP = PARAM1;
+ JUMP_TB(PARAM1, 0, PARAM2);
else
- EIP = PARAM2;
+ JUMP_TB(PARAM1, 1, PARAM3);
FORCE_RET();
}
diff --git a/translate-i386.c b/translate-i386.c
index 32e188bbb7..9ef7a3b058 100644
--- a/translate-i386.c
+++ b/translate-i386.c
@@ -31,11 +31,15 @@
#define IN_OP_I386
#include "cpu-i386.h"
+#include "exec.h"
/* XXX: move that elsewhere */
static uint16_t *gen_opc_ptr;
static uint32_t *gen_opparam_ptr;
int __op_param1, __op_param2, __op_param3;
+#ifdef USE_DIRECT_JUMP
+int __op_jmp0, __op_jmp1;
+#endif
#ifdef __i386__
static inline void flush_icache_range(unsigned long start, unsigned long stop)
@@ -67,14 +71,14 @@ static void inline flush_icache_range(unsigned long start, unsigned long stop)
stop = (stop + MIN_CACHE_LINE_SIZE - 1) & ~(MIN_CACHE_LINE_SIZE - 1);
for (p = start; p < stop; p += MIN_CACHE_LINE_SIZE) {
- asm ("dcbst 0,%0;" : : "r"(p) : "memory");
+ asm volatile ("dcbst 0,%0" : : "r"(p) : "memory");
}
- asm ("sync");
+ asm volatile ("sync" : : : "memory");
for (p = start; p < stop; p += MIN_CACHE_LINE_SIZE) {
- asm ("icbi 0,%0; sync;" : : "r"(p) : "memory");
+ asm volatile ("icbi 0,%0" : : "r"(p) : "memory");
}
- asm ("sync");
- asm ("isync");
+ asm volatile ("sync" : : : "memory");
+ asm volatile ("isync" : : : "memory");
}
#endif
@@ -129,6 +133,7 @@ typedef struct DisasContext {
int cpl;
int iopl;
int tf; /* TF cpu flag */
+ TranslationBlock *tb;
} DisasContext;
/* i386 arith/logic operations */
@@ -192,6 +197,7 @@ enum {
typedef void (GenOpFunc)(void);
typedef void (GenOpFunc1)(long);
typedef void (GenOpFunc2)(long, long);
+typedef void (GenOpFunc3)(long, long, long);
static GenOpFunc *gen_op_mov_reg_T0[3][8] = {
[OT_BYTE] = {
@@ -699,18 +705,7 @@ enum {
JCC_LE,
};
-static GenOpFunc2 *gen_jcc_slow[8] = {
- gen_op_jo_cc,
- gen_op_jb_cc,
- gen_op_jz_cc,
- gen_op_jbe_cc,
- gen_op_js_cc,
- gen_op_jp_cc,
- gen_op_jl_cc,
- gen_op_jle_cc,
-};
-
-static GenOpFunc2 *gen_jcc_sub[3][8] = {
+static GenOpFunc3 *gen_jcc_sub[3][8] = {
[OT_BYTE] = {
NULL,
gen_op_jb_subb,
@@ -1090,8 +1085,9 @@ static inline uint32_t insn_get(DisasContext *s, int ot)
static inline void gen_jcc(DisasContext *s, int b, int val, int next_eip)
{
+ TranslationBlock *tb;
int inv, jcc_op;
- GenOpFunc2 *func;
+ GenOpFunc3 *func;
inv = b & 1;
jcc_op = (b >> 1) & 7;
@@ -1101,8 +1097,6 @@ static inline void gen_jcc(DisasContext *s, int b, int val, int next_eip)
case CC_OP_SUBW:
case CC_OP_SUBL:
func = gen_jcc_sub[s->cc_op - CC_OP_SUBB][jcc_op];
- if (!func)
- goto slow_jcc;
break;
/* some jumps are easy to compute */
@@ -1138,21 +1132,30 @@ static inline void gen_jcc(DisasContext *s, int b, int val, int next_eip)
func = gen_jcc_sub[(s->cc_op - CC_OP_ADDB) % 3][jcc_op];
break;
default:
- goto slow_jcc;
+ func = NULL;
+ break;
}
break;
default:
- slow_jcc:
- if (s->cc_op != CC_OP_DYNAMIC)
- gen_op_set_cc_op(s->cc_op);
- func = gen_jcc_slow[jcc_op];
+ func = NULL;
break;
}
+
+ if (s->cc_op != CC_OP_DYNAMIC)
+ gen_op_set_cc_op(s->cc_op);
+
+ if (!func) {
+ gen_setcc_slow[jcc_op]();
+ func = gen_op_jcc;
+ }
+
+ tb = s->tb;
if (!inv) {
- func(val, next_eip);
+ func((long)tb, val, next_eip);
} else {
- func(next_eip, val);
+ func((long)tb, next_eip, val);
}
+ s->is_jmp = 3;
}
static void gen_setcc(DisasContext *s, int b)
@@ -1372,6 +1375,18 @@ static void gen_exception(DisasContext *s, int trapno, unsigned int cur_eip)
s->is_jmp = 1;
}
+/* generate a jump to eip. No segment change must happen before as a
+ direct call to the next block may occur */
+static void gen_jmp(DisasContext *s, unsigned int eip)
+{
+ TranslationBlock *tb = s->tb;
+
+ if (s->cc_op != CC_OP_DYNAMIC)
+ gen_op_set_cc_op(s->cc_op);
+ gen_op_jmp_tb_next((long)tb, eip);
+ s->is_jmp = 3;
+}
+
/* return the next pc address. Return -1 if no insn found. *is_jmp_ptr
is set to true if the instruction sets the PC (last instruction of
a basic block) */
@@ -2964,8 +2979,7 @@ long disas_insn(DisasContext *s, uint8_t *pc_start)
val &= 0xffff;
gen_op_movl_T0_im(next_eip);
gen_push_T0(s);
- gen_op_jmp_im(val);
- s->is_jmp = 1;
+ gen_jmp(s, val);
}
break;
case 0x9a: /* lcall im */
@@ -2996,8 +3010,7 @@ long disas_insn(DisasContext *s, uint8_t *pc_start)
val += s->pc - s->cs_base;
if (s->dflag == 0)
val = val & 0xffff;
- gen_op_jmp_im(val);
- s->is_jmp = 1;
+ gen_jmp(s, val);
break;
case 0xea: /* ljmp im */
{
@@ -3019,8 +3032,7 @@ long disas_insn(DisasContext *s, uint8_t *pc_start)
val += s->pc - s->cs_base;
if (s->dflag == 0)
val = val & 0xffff;
- gen_op_jmp_im(val);
- s->is_jmp = 1;
+ gen_jmp(s, val);
break;
case 0x70 ... 0x7f: /* jcc Jb */
val = (int8_t)insn_get(s, OT_BYTE);
@@ -3037,7 +3049,6 @@ long disas_insn(DisasContext *s, uint8_t *pc_start)
if (s->dflag == 0)
val &= 0xffff;
gen_jcc(s, b, val, next_eip);
- s->is_jmp = 1;
break;
case 0x190 ... 0x19f: /* setcc Gv */
@@ -3393,15 +3404,6 @@ static uint16_t opc_read_flags[NB_OPS] = {
[INDEX_op_into] = CC_O,
- [INDEX_op_jo_cc] = CC_O,
- [INDEX_op_jb_cc] = CC_C,
- [INDEX_op_jz_cc] = CC_Z,
- [INDEX_op_jbe_cc] = CC_Z | CC_C,
- [INDEX_op_js_cc] = CC_S,
- [INDEX_op_jp_cc] = CC_P,
- [INDEX_op_jl_cc] = CC_O | CC_S,
- [INDEX_op_jle_cc] = CC_O | CC_S | CC_Z,
-
[INDEX_op_jb_subb] = CC_C,
[INDEX_op_jb_subw] = CC_C,
[INDEX_op_jb_subl] = CC_C,
@@ -3730,7 +3732,7 @@ static uint32_t gen_opparam_buf[OPPARAM_BUF_SIZE];
int cpu_x86_gen_code(uint8_t *gen_code_buf, int max_code_size,
int *gen_code_size_ptr,
uint8_t *pc_start, uint8_t *cs_base, int flags,
- int *code_size_ptr)
+ int *code_size_ptr, TranslationBlock *tb)
{
DisasContext dc1, *dc = &dc1;
uint8_t *pc_ptr;
@@ -3750,6 +3752,7 @@ int cpu_x86_gen_code(uint8_t *gen_code_buf, int max_code_size,
dc->tf = (flags >> GEN_FLAG_TF_SHIFT) & 1;
dc->cc_op = CC_OP_DYNAMIC;
dc->cs_base = cs_base;
+ dc->tb = tb;
gen_opc_ptr = gen_opc_buf;
gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
@@ -3776,15 +3779,21 @@ int cpu_x86_gen_code(uint8_t *gen_code_buf, int max_code_size,
} while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
(pc_ptr - pc_start) < (TARGET_PAGE_SIZE - 32));
/* we must store the eflags state if it is not already done */
- if (dc->cc_op != CC_OP_DYNAMIC)
- gen_op_set_cc_op(dc->cc_op);
- if (dc->is_jmp != 1) {
- /* we add an additionnal jmp to update the simulated PC */
- gen_op_jmp_im(ret - (unsigned long)dc->cs_base);
+ if (dc->is_jmp != 3) {
+ if (dc->cc_op != CC_OP_DYNAMIC)
+ gen_op_set_cc_op(dc->cc_op);
+ if (dc->is_jmp != 1) {
+ /* we add an additionnal jmp to update the simulated PC */
+ gen_op_jmp_im(ret - (unsigned long)dc->cs_base);
+ }
}
if (dc->tf) {
gen_op_raise_exception(EXCP01_SSTP);
}
+ if (dc->is_jmp != 3) {
+ /* indicate that the hash table must be used to find the next TB */
+ gen_op_movl_T0_0();
+ }
*gen_opc_ptr = INDEX_op_end;
@@ -3814,8 +3823,17 @@ int cpu_x86_gen_code(uint8_t *gen_code_buf, int max_code_size,
#endif
/* generate machine code */
- gen_code_size = dyngen_code(gen_code_buf, gen_opc_buf, gen_opparam_buf);
+ tb->tb_next_offset[0] = 0xffff;
+ tb->tb_next_offset[1] = 0xffff;
+ gen_code_size = dyngen_code(gen_code_buf, tb->tb_next_offset,
+#ifdef USE_DIRECT_JUMP
+ tb->tb_jmp_offset,
+#else
+ NULL,
+#endif
+ gen_opc_buf, gen_opparam_buf);
flush_icache_range((unsigned long)gen_code_buf, (unsigned long)(gen_code_buf + gen_code_size));
+
*gen_code_size_ptr = gen_code_size;
*code_size_ptr = pc_ptr - pc_start;
#ifdef DEBUG_DISAS