summaryrefslogtreecommitdiff
path: root/target/arm
diff options
context:
space:
mode:
authorLluís Vilanova <vilanova@ac.upc.edu>2017-07-14 12:42:23 +0300
committerRichard Henderson <richard.henderson@linaro.org>2017-09-06 08:06:48 -0700
commit70d3c035ae36a2c5c0f991ba958526127c92bb67 (patch)
treea8d007f58a9c0dfe9bc896e1ac307c0d975ff809 /target/arm
parent24299c892cbfe29120f051b6b7d0bcf3e0cc8e85 (diff)
downloadqemu-70d3c035ae36a2c5c0f991ba958526127c92bb67.tar.gz
target/arm: [tcg] Port to tb_stop
Incrementally paves the way towards using the generic instruction translation loop. Reviewed-by: Emilio G. Cota <cota@braap.org> Signed-off-by: Lluís Vilanova <vilanova@ac.upc.edu> Message-Id: <150002534291.22386.13499916738708680298.stgit@frigg.lan> Signed-off-by: Richard Henderson <rth@twiddle.net>
Diffstat (limited to 'target/arm')
-rw-r--r--target/arm/translate.c161
1 files changed, 84 insertions, 77 deletions
diff --git a/target/arm/translate.c b/target/arm/translate.c
index 5737299943..10527b50c8 100644
--- a/target/arm/translate.c
+++ b/target/arm/translate.c
@@ -12057,85 +12057,13 @@ static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
dc->base.pc_next = dc->pc;
}
-/* generate intermediate code for basic block 'tb'. */
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
+static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
{
- DisasContext dc1, *dc = &dc1;
- int max_insns;
-
- /* generate intermediate code */
-
- /* The A64 decoder has its own top level loop, because it doesn't need
- * the A32/T32 complexity to do with conditional execution/IT blocks/etc.
- */
- if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
- gen_intermediate_code_a64(&dc->base, cs, tb);
- return;
- }
-
- dc->base.tb = tb;
- dc->base.pc_first = dc->base.tb->pc;
- dc->base.pc_next = dc->base.pc_first;
- dc->base.is_jmp = DISAS_NEXT;
- dc->base.num_insns = 0;
- dc->base.singlestep_enabled = cs->singlestep_enabled;
-
- max_insns = tb->cflags & CF_COUNT_MASK;
- if (max_insns == 0) {
- max_insns = CF_COUNT_MASK;
- }
- if (max_insns > TCG_MAX_INSNS) {
- max_insns = TCG_MAX_INSNS;
- }
- max_insns = arm_tr_init_disas_context(&dc->base, cs, max_insns);
-
- gen_tb_start(tb);
-
- tcg_clear_temp_count();
- arm_tr_tb_start(&dc->base, cs);
-
- do {
- dc->base.num_insns++;
- arm_tr_insn_start(&dc->base, cs);
-
- if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
- CPUBreakpoint *bp;
- QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
- if (bp->pc == dc->base.pc_next) {
- if (arm_tr_breakpoint_check(&dc->base, cs, bp)) {
- break;
- }
- }
- }
- if (dc->base.is_jmp > DISAS_TOO_MANY) {
- break;
- }
- }
-
- if (dc->base.num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
- gen_io_start();
- }
-
- arm_tr_translate_insn(&dc->base, cs);
-
- if (tcg_check_temp_count()) {
- fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
- dc->pc);
- }
-
- if (!dc->base.is_jmp && (tcg_op_buf_full() || singlestep ||
- dc->base.num_insns >= max_insns)) {
- dc->base.is_jmp = DISAS_TOO_MANY;
- }
- } while (!dc->base.is_jmp);
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
- if (tb->cflags & CF_LAST_IO) {
- if (dc->condjmp) {
- /* FIXME: This can theoretically happen with self-modifying
- code. */
- cpu_abort(cs, "IO on conditional branch instruction");
- }
- gen_io_end();
+ if (dc->base.tb->cflags & CF_LAST_IO && dc->condjmp) {
+ /* FIXME: This can theoretically happen with self-modifying code. */
+ cpu_abort(cpu, "IO on conditional branch instruction");
}
/* At this stage dc->condjmp will only be set when the skipped
@@ -12241,6 +12169,85 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
gen_goto_tb(dc, 1, dc->pc);
}
}
+}
+
+/* generate intermediate code for basic block 'tb'. */
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
+{
+ DisasContext dc1, *dc = &dc1;
+ int max_insns;
+
+ /* generate intermediate code */
+
+ /* The A64 decoder has its own top level loop, because it doesn't need
+ * the A32/T32 complexity to do with conditional execution/IT blocks/etc.
+ */
+ if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
+ gen_intermediate_code_a64(&dc->base, cs, tb);
+ return;
+ }
+
+ dc->base.tb = tb;
+ dc->base.pc_first = dc->base.tb->pc;
+ dc->base.pc_next = dc->base.pc_first;
+ dc->base.is_jmp = DISAS_NEXT;
+ dc->base.num_insns = 0;
+ dc->base.singlestep_enabled = cs->singlestep_enabled;
+
+ max_insns = tb->cflags & CF_COUNT_MASK;
+ if (max_insns == 0) {
+ max_insns = CF_COUNT_MASK;
+ }
+ if (max_insns > TCG_MAX_INSNS) {
+ max_insns = TCG_MAX_INSNS;
+ }
+ max_insns = arm_tr_init_disas_context(&dc->base, cs, max_insns);
+
+ gen_tb_start(tb);
+
+ tcg_clear_temp_count();
+ arm_tr_tb_start(&dc->base, cs);
+
+ do {
+ dc->base.num_insns++;
+ arm_tr_insn_start(&dc->base, cs);
+
+ if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
+ CPUBreakpoint *bp;
+ QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
+ if (bp->pc == dc->base.pc_next) {
+ if (arm_tr_breakpoint_check(&dc->base, cs, bp)) {
+ break;
+ }
+ }
+ }
+ if (dc->base.is_jmp > DISAS_TOO_MANY) {
+ break;
+ }
+ }
+
+ if (dc->base.num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
+ gen_io_start();
+ }
+
+ arm_tr_translate_insn(&dc->base, cs);
+
+ if (tcg_check_temp_count()) {
+ fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
+ dc->pc);
+ }
+
+ if (!dc->base.is_jmp && (tcg_op_buf_full() || singlestep ||
+ dc->base.num_insns >= max_insns)) {
+ dc->base.is_jmp = DISAS_TOO_MANY;
+ }
+ } while (!dc->base.is_jmp);
+
+ if (dc->base.tb->cflags & CF_LAST_IO) {
+ gen_io_end();
+ }
+
+ arm_tr_tb_stop(&dc->base, cs);
gen_tb_end(tb, dc->base.num_insns);