summaryrefslogtreecommitdiff
path: root/accel
diff options
context:
space:
mode:
authorEmilio G. Cota <cota@braap.org>2017-07-11 14:29:37 -0400
committerRichard Henderson <richard.henderson@linaro.org>2017-10-24 13:53:41 -0700
commit4e2ca83e71b51577b06b1468e836556912bd5b6e (patch)
treeb9fce2b5f36d6f171f8c80e932e25f9a25f1cd3e /accel
parente89b28a63501c0ad6d2501fe851d0c5202055e70 (diff)
downloadqemu-4e2ca83e71b51577b06b1468e836556912bd5b6e.tar.gz
tcg: define CF_PARALLEL and use it for TB hashing along with CF_COUNT_MASK
This will enable us to decouple code translation from the value of parallel_cpus at any given time. It will also help us minimize TB flushes when generating code via EXCP_ATOMIC. Note that the declaration of parallel_cpus is brought to exec-all.h to be able to define there the "curr_cflags" inline. Signed-off-by: Emilio G. Cota <cota@braap.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'accel')
-rw-r--r--accel/tcg/cpu-exec.c45
-rw-r--r--accel/tcg/tcg-runtime.c2
-rw-r--r--accel/tcg/translate-all.c13
3 files changed, 33 insertions, 27 deletions
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index 363dfa208a..39ec9508d1 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -207,7 +207,8 @@ static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
tb_lock();
tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
max_cycles | CF_NOCACHE
- | (ignore_icount ? CF_IGNORE_ICOUNT : 0));
+ | (ignore_icount ? CF_IGNORE_ICOUNT : 0)
+ | curr_cflags());
tb->orig_tb = orig_tb;
tb_unlock();
@@ -225,31 +226,27 @@ static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
static void cpu_exec_step(CPUState *cpu)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
- CPUArchState *env = (CPUArchState *)cpu->env_ptr;
TranslationBlock *tb;
target_ulong cs_base, pc;
uint32_t flags;
+ uint32_t cflags = 1 | CF_IGNORE_ICOUNT;
- cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
if (sigsetjmp(cpu->jmp_env, 0) == 0) {
- mmap_lock();
- tb_lock();
- tb = tb_gen_code(cpu, pc, cs_base, flags,
- 1 | CF_NOCACHE | CF_IGNORE_ICOUNT);
- tb->orig_tb = NULL;
- tb_unlock();
- mmap_unlock();
+ tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags,
+ cflags & CF_HASH_MASK);
+ if (tb == NULL) {
+ mmap_lock();
+ tb_lock();
+ tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
+ tb_unlock();
+ mmap_unlock();
+ }
cc->cpu_exec_enter(cpu);
/* execute the generated code */
- trace_exec_tb_nocache(tb, pc);
+ trace_exec_tb(tb, pc);
cpu_tb_exec(cpu, tb);
cc->cpu_exec_exit(cpu);
-
- tb_lock();
- tb_phys_invalidate(tb, -1);
- tb_free(tb);
- tb_unlock();
} else {
/* We may have exited due to another problem here, so we need
* to reset any tb_locks we may have taken but didn't release.
@@ -281,6 +278,7 @@ struct tb_desc {
CPUArchState *env;
tb_page_addr_t phys_page1;
uint32_t flags;
+ uint32_t cf_mask;
uint32_t trace_vcpu_dstate;
};
@@ -294,7 +292,7 @@ static bool tb_cmp(const void *p, const void *d)
tb->cs_base == desc->cs_base &&
tb->flags == desc->flags &&
tb->trace_vcpu_dstate == desc->trace_vcpu_dstate &&
- !(atomic_read(&tb->cflags) & CF_INVALID)) {
+ (tb_cflags(tb) & (CF_HASH_MASK | CF_INVALID)) == desc->cf_mask) {
/* check next page if needed */
if (tb->page_addr[1] == -1) {
return true;
@@ -313,7 +311,8 @@ static bool tb_cmp(const void *p, const void *d)
}
TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
- target_ulong cs_base, uint32_t flags)
+ target_ulong cs_base, uint32_t flags,
+ uint32_t cf_mask)
{
tb_page_addr_t phys_pc;
struct tb_desc desc;
@@ -322,11 +321,12 @@ TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
desc.env = (CPUArchState *)cpu->env_ptr;
desc.cs_base = cs_base;
desc.flags = flags;
+ desc.cf_mask = cf_mask;
desc.trace_vcpu_dstate = *cpu->trace_dstate;
desc.pc = pc;
phys_pc = get_page_addr_code(desc.env, pc);
desc.phys_page1 = phys_pc & TARGET_PAGE_MASK;
- h = tb_hash_func(phys_pc, pc, flags, *cpu->trace_dstate);
+ h = tb_hash_func(phys_pc, pc, flags, cf_mask, *cpu->trace_dstate);
return qht_lookup(&tcg_ctx.tb_ctx.htable, tb_cmp, &desc, h);
}
@@ -373,8 +373,9 @@ static inline TranslationBlock *tb_find(CPUState *cpu,
target_ulong cs_base, pc;
uint32_t flags;
bool acquired_tb_lock = false;
+ uint32_t cf_mask = curr_cflags();
- tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags);
+ tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
if (tb == NULL) {
/* mmap_lock is needed by tb_gen_code, and mmap_lock must be
* taken outside tb_lock. As system emulation is currently
@@ -387,10 +388,10 @@ static inline TranslationBlock *tb_find(CPUState *cpu,
/* There's a chance that our desired tb has been translated while
* taking the locks so we check again inside the lock.
*/
- tb = tb_htable_lookup(cpu, pc, cs_base, flags);
+ tb = tb_htable_lookup(cpu, pc, cs_base, flags, cf_mask);
if (likely(tb == NULL)) {
/* if no translated code available, then translate it now */
- tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
+ tb = tb_gen_code(cpu, pc, cs_base, flags, cf_mask);
}
mmap_unlock();
diff --git a/accel/tcg/tcg-runtime.c b/accel/tcg/tcg-runtime.c
index 54d89100d9..25f0cabfed 100644
--- a/accel/tcg/tcg-runtime.c
+++ b/accel/tcg/tcg-runtime.c
@@ -151,7 +151,7 @@ void *HELPER(lookup_tb_ptr)(CPUArchState *env)
target_ulong cs_base, pc;
uint32_t flags;
- tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags);
+ tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, curr_cflags());
if (tb == NULL) {
return tcg_ctx.code_gen_epilogue;
}
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
index 1b43deb0cd..7ad65bc705 100644
--- a/accel/tcg/translate-all.c
+++ b/accel/tcg/translate-all.c
@@ -1101,7 +1101,8 @@ void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
/* remove the TB from the hash list */
phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
- h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->trace_vcpu_dstate);
+ h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags & CF_HASH_MASK,
+ tb->trace_vcpu_dstate);
qht_remove(&tcg_ctx.tb_ctx.htable, tb, h);
/* remove the TB from the page list */
@@ -1245,7 +1246,8 @@ static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
}
/* add in the hash table */
- h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->trace_vcpu_dstate);
+ h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags & CF_HASH_MASK,
+ tb->trace_vcpu_dstate);
qht_insert(&tcg_ctx.tb_ctx.htable, tb, h);
#ifdef CONFIG_USER_ONLY
@@ -1548,7 +1550,8 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
/* we generate a block containing just the instruction
modifying the memory. It will ensure that it cannot modify
itself */
- tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
+ tb_gen_code(cpu, current_pc, current_cs_base, current_flags,
+ 1 | curr_cflags());
cpu_loop_exit_noexc(cpu);
}
#endif
@@ -1666,7 +1669,8 @@ static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
/* we generate a block containing just the instruction
modifying the memory. It will ensure that it cannot modify
itself */
- tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
+ tb_gen_code(cpu, current_pc, current_cs_base, current_flags,
+ 1 | curr_cflags());
/* tb_lock will be reset after cpu_loop_exit_noexc longjmps
* back into the cpu_exec loop. */
return true;
@@ -1810,6 +1814,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
}
cflags = n | CF_LAST_IO;
+ cflags |= curr_cflags();
pc = tb->pc;
cs_base = tb->cs_base;
flags = tb->flags;