summaryrefslogtreecommitdiff
path: root/cpu-exec.c
diff options
context:
space:
mode:
authorSergey Fedorov <serge.fdrv@gmail.com>2016-04-13 22:40:18 +0300
committerRichard Henderson <rth@twiddle.net>2016-05-12 14:06:42 -1000
commit6f789be56d3f38e9214dafcfab3bf9be7191f370 (patch)
tree27f9f877f4c7758bccf296354527e895a7bdb569 /cpu-exec.c
parent819af24b9c1e95e6576f1cefd32f4d6bf56dfa56 (diff)
downloadqemu-6f789be56d3f38e9214dafcfab3bf9be7191f370.tar.gz
tcg: Rework tb_invalidated_flag
'tb_invalidated_flag' was meant to catch two events: * some TB has been invalidated by tb_phys_invalidate(); * the whole translation buffer has been flushed by tb_flush(). Then it was checked: * in cpu_exec() to ensure that the last executed TB can be safely linked to directly call the next one; * in cpu_exec_nocache() to decide if the original TB should be provided for further possible invalidation along with the temporarily generated TB. It is always safe to patch an invalidated TB since it is not going to be used anyway. It is also safe to call tb_phys_invalidate() for an already invalidated TB. Thus, setting this flag in tb_phys_invalidate() is simply unnecessary. Moreover, it can prevent from pretty proper linking of TBs, if any arbitrary TB has been invalidated. So just don't touch it in tb_phys_invalidate(). If this flag is only used to catch whether tb_flush() has been called then rename it to 'tb_flushed'. Declare it as 'bool' and stick to using only 'true' and 'false' to set its value. Also, instead of setting it in tb_gen_code(), just after tb_flush() has been called, do it right inside of tb_flush(). In cpu_exec(), this flag is used to track if tb_flush() has been called and have made 'next_tb' (a reference to the last executed TB) invalid for linking it to directly call the next TB. tb_flush() can be called during the CPU execution loop from tb_gen_code(), during TB execution or by another thread while 'tb_lock' is released. Catch for translation buffer flush reliably by resetting this flag once before first TB lookup and each time we find it set before trying to add a direct jump. Don't touch in in tb_find_physical(). Each vCPU has its own execution loop in multithreaded mode and thus should have its own copy of the flag to be able to reset it with its own 'next_tb' and don't affect any other vCPU execution thread. So make this flag per-vCPU and move it to CPUState. In cpu_exec_nocache(), we only need to check if tb_flush() has been called from tb_gen_code() called by cpu_exec_nocache() itself. To do this reliably, preserve the old value of the flag, reset it before calling tb_gen_code(), check afterwards, and combine the saved value back to the flag. This patch is based on the patch "tcg: move tb_invalidated_flag to CPUState" from Paolo Bonzini <pbonzini@redhat.com>. Signed-off-by: Sergey Fedorov <serge.fdrv@gmail.com> Signed-off-by: Sergey Fedorov <sergey.fedorov@linaro.org> Signed-off-by: Richard Henderson <rth@twiddle.net>
Diffstat (limited to 'cpu-exec.c')
-rw-r--r--cpu-exec.c21
1 files changed, 11 insertions, 10 deletions
diff --git a/cpu-exec.c b/cpu-exec.c
index 9407c66f62..f49a436e1a 100644
--- a/cpu-exec.c
+++ b/cpu-exec.c
@@ -202,16 +202,20 @@ static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
TranslationBlock *orig_tb, bool ignore_icount)
{
TranslationBlock *tb;
+ bool old_tb_flushed;
/* Should never happen.
We only end up here when an existing TB is too long. */
if (max_cycles > CF_COUNT_MASK)
max_cycles = CF_COUNT_MASK;
+ old_tb_flushed = cpu->tb_flushed;
+ cpu->tb_flushed = false;
tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
max_cycles | CF_NOCACHE
| (ignore_icount ? CF_IGNORE_ICOUNT : 0));
- tb->orig_tb = tcg_ctx.tb_ctx.tb_invalidated_flag ? NULL : orig_tb;
+ tb->orig_tb = cpu->tb_flushed ? NULL : orig_tb;
+ cpu->tb_flushed |= old_tb_flushed;
cpu->current_tb = tb;
/* execute the generated code */
trace_exec_tb_nocache(tb, tb->pc);
@@ -232,8 +236,6 @@ static TranslationBlock *tb_find_physical(CPUState *cpu,
unsigned int h;
tb_page_addr_t phys_pc, phys_page1;
- tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
-
/* find translated block using physical mappings */
phys_pc = get_page_addr_code(env, pc);
phys_page1 = phys_pc & TARGET_PAGE_MASK;
@@ -446,6 +448,7 @@ int cpu_exec(CPUState *cpu)
}
last_tb = NULL; /* forget the last executed TB after exception */
+ cpu->tb_flushed = false; /* reset before first TB lookup */
for(;;) {
interrupt_request = cpu->interrupt_request;
if (unlikely(interrupt_request)) {
@@ -510,14 +513,12 @@ int cpu_exec(CPUState *cpu)
}
tb_lock();
tb = tb_find_fast(cpu);
- /* Note: we do it here to avoid a gcc bug on Mac OS X when
- doing it in tb_find_slow */
- if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
- /* as some TB could have been invalidated because
- of memory exceptions while generating the code, we
- must recompute the hash index here */
+ if (cpu->tb_flushed) {
+ /* Ensure that no TB jump will be modified as the
+ * translation buffer has been flushed.
+ */
last_tb = NULL;
- tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
+ cpu->tb_flushed = false;
}
/* See if we can patch the calling TB. */
if (last_tb && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {