summaryrefslogtreecommitdiff
path: root/target
diff options
context:
space:
mode:
Diffstat (limited to 'target')
-rw-r--r--target/alpha/cpu.c3
-rw-r--r--target/alpha/translate.c14
-rw-r--r--target/arm/cpu.c8
-rw-r--r--target/arm/helper-a64.c38
-rw-r--r--target/arm/helper-a64.h4
-rw-r--r--target/arm/op_helper.c7
-rw-r--r--target/arm/translate-a64.c38
-rw-r--r--target/arm/translate.c19
-rw-r--r--target/arm/translate.h1
-rw-r--r--target/cris/cpu.c16
-rw-r--r--target/cris/translate.c15
-rw-r--r--target/cris/translate_v10.c2
-rw-r--r--target/hppa/cpu.c3
-rw-r--r--target/hppa/helper.h2
-rw-r--r--target/hppa/op_helper.c32
-rw-r--r--target/hppa/translate.c24
-rw-r--r--target/i386/cpu.c7
-rw-r--r--target/i386/translate.c67
-rw-r--r--target/lm32/cpu.c7
-rw-r--r--target/lm32/translate.c18
-rw-r--r--target/m68k/cpu.c7
-rw-r--r--target/m68k/helper.h1
-rw-r--r--target/m68k/op_helper.c33
-rw-r--r--target/m68k/translate.c25
-rw-r--r--target/microblaze/cpu.c7
-rw-r--r--target/microblaze/translate.c10
-rw-r--r--target/mips/cpu.c7
-rw-r--r--target/mips/translate.c37
-rw-r--r--target/moxie/cpu.c7
-rw-r--r--target/moxie/translate.c11
-rw-r--r--target/nios2/cpu.c7
-rw-r--r--target/nios2/translate.c10
-rw-r--r--target/openrisc/cpu.c7
-rw-r--r--target/openrisc/translate.c9
-rw-r--r--target/ppc/translate.c20
-rw-r--r--target/ppc/translate_init.c41
-rw-r--r--target/s390x/cpu.c9
-rw-r--r--target/s390x/helper.h4
-rw-r--r--target/s390x/mem_helper.c80
-rw-r--r--target/s390x/translate.c40
-rw-r--r--target/sh4/cpu.c5
-rw-r--r--target/sh4/translate.c19
-rw-r--r--target/sparc/cpu.c5
-rw-r--r--target/sparc/cpu.h2
-rw-r--r--target/sparc/translate.c36
-rw-r--r--target/tilegx/cpu.c7
-rw-r--r--target/tilegx/translate.c5
-rw-r--r--target/tricore/cpu.c5
-rw-r--r--target/tricore/translate.c11
-rw-r--r--target/unicore32/cpu.c7
-rw-r--r--target/unicore32/translate.c10
-rw-r--r--target/xtensa/cpu.c7
-rw-r--r--target/xtensa/translate.c31
53 files changed, 402 insertions, 445 deletions
diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c
index b8a21f4e01..bc9520535b 100644
--- a/target/alpha/cpu.c
+++ b/target/alpha/cpu.c
@@ -260,8 +260,6 @@ static void alpha_cpu_initfn(Object *obj)
cs->env_ptr = env;
tlb_flush(cs);
- alpha_translate_init();
-
env->lock_addr = -1;
#if defined(CONFIG_USER_ONLY)
env->flags = ENV_FLAG_PS_USER | ENV_FLAG_FEN;
@@ -299,6 +297,7 @@ static void alpha_cpu_class_init(ObjectClass *oc, void *data)
dc->vmsd = &vmstate_alpha_cpu;
#endif
cc->disas_set_info = alpha_cpu_disas_set_info;
+ cc->tcg_initialize = alpha_translate_init;
cc->gdb_num_core_regs = 67;
}
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
index 3de369b17e..629f35ec8e 100644
--- a/target/alpha/translate.c
+++ b/target/alpha/translate.c
@@ -78,7 +78,6 @@ struct DisasContext {
#define DISAS_PC_STALE DISAS_TARGET_2
/* global register indexes */
-static TCGv_env cpu_env;
static TCGv cpu_std_ir[31];
static TCGv cpu_fir[31];
static TCGv cpu_pc;
@@ -124,17 +123,8 @@ void alpha_translate_init(void)
};
#endif
- static bool done_init = 0;
int i;
- if (done_init) {
- return;
- }
- done_init = 1;
-
- cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
- tcg_ctx.tcg_env = cpu_env;
-
for (i = 0; i < 31; i++) {
cpu_std_ir[i] = tcg_global_mem_new_i64(cpu_env,
offsetof(CPUAlphaState, ir[i]),
@@ -461,7 +451,7 @@ static bool in_superpage(DisasContext *ctx, int64_t addr)
static bool use_exit_tb(DisasContext *ctx)
{
- return ((ctx->base.tb->cflags & CF_LAST_IO)
+ return ((tb_cflags(ctx->base.tb) & CF_LAST_IO)
|| ctx->base.singlestep_enabled
|| singlestep);
}
@@ -2405,7 +2395,7 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
case 0xC000:
/* RPCC */
va = dest_gpr(ctx, ra);
- if (ctx->base.tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
gen_io_start();
gen_helper_load_pcc(va, cpu_env);
gen_io_end();
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index a0ed11c9a5..47c8b2a85c 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -535,7 +535,6 @@ static void arm_cpu_initfn(Object *obj)
{
CPUState *cs = CPU(obj);
ARMCPU *cpu = ARM_CPU(obj);
- static bool inited;
cs->env_ptr = &cpu->env;
cpu->cp_regs = g_hash_table_new_full(g_int_hash, g_int_equal,
@@ -579,10 +578,6 @@ static void arm_cpu_initfn(Object *obj)
if (tcg_enabled()) {
cpu->psci_version = 2; /* TCG implements PSCI 0.2 */
- if (!inited) {
- inited = true;
- arm_translate_init();
- }
}
}
@@ -1766,6 +1761,9 @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
#endif
cc->disas_set_info = arm_disas_set_info;
+#ifdef CONFIG_TCG
+ cc->tcg_initialize = arm_translate_init;
+#endif
}
static void cpu_register(const ARMCPUInfo *info)
diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c
index d9df82cff5..d0e435ca4b 100644
--- a/target/arm/helper-a64.c
+++ b/target/arm/helper-a64.c
@@ -430,8 +430,9 @@ uint64_t HELPER(crc32c_64)(uint64_t acc, uint64_t val, uint32_t bytes)
}
/* Returns 0 on success; 1 otherwise. */
-uint64_t HELPER(paired_cmpxchg64_le)(CPUARMState *env, uint64_t addr,
- uint64_t new_lo, uint64_t new_hi)
+static uint64_t do_paired_cmpxchg64_le(CPUARMState *env, uint64_t addr,
+ uint64_t new_lo, uint64_t new_hi,
+ bool parallel)
{
uintptr_t ra = GETPC();
Int128 oldv, cmpv, newv;
@@ -440,7 +441,7 @@ uint64_t HELPER(paired_cmpxchg64_le)(CPUARMState *env, uint64_t addr,
cmpv = int128_make128(env->exclusive_val, env->exclusive_high);
newv = int128_make128(new_lo, new_hi);
- if (parallel_cpus) {
+ if (parallel) {
#ifndef CONFIG_ATOMIC128
cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
#else
@@ -484,8 +485,21 @@ uint64_t HELPER(paired_cmpxchg64_le)(CPUARMState *env, uint64_t addr,
return !success;
}
-uint64_t HELPER(paired_cmpxchg64_be)(CPUARMState *env, uint64_t addr,
- uint64_t new_lo, uint64_t new_hi)
+uint64_t HELPER(paired_cmpxchg64_le)(CPUARMState *env, uint64_t addr,
+ uint64_t new_lo, uint64_t new_hi)
+{
+ return do_paired_cmpxchg64_le(env, addr, new_lo, new_hi, false);
+}
+
+uint64_t HELPER(paired_cmpxchg64_le_parallel)(CPUARMState *env, uint64_t addr,
+ uint64_t new_lo, uint64_t new_hi)
+{
+ return do_paired_cmpxchg64_le(env, addr, new_lo, new_hi, true);
+}
+
+static uint64_t do_paired_cmpxchg64_be(CPUARMState *env, uint64_t addr,
+ uint64_t new_lo, uint64_t new_hi,
+ bool parallel)
{
uintptr_t ra = GETPC();
Int128 oldv, cmpv, newv;
@@ -494,7 +508,7 @@ uint64_t HELPER(paired_cmpxchg64_be)(CPUARMState *env, uint64_t addr,
cmpv = int128_make128(env->exclusive_val, env->exclusive_high);
newv = int128_make128(new_lo, new_hi);
- if (parallel_cpus) {
+ if (parallel) {
#ifndef CONFIG_ATOMIC128
cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
#else
@@ -537,3 +551,15 @@ uint64_t HELPER(paired_cmpxchg64_be)(CPUARMState *env, uint64_t addr,
return !success;
}
+
+uint64_t HELPER(paired_cmpxchg64_be)(CPUARMState *env, uint64_t addr,
+ uint64_t new_lo, uint64_t new_hi)
+{
+ return do_paired_cmpxchg64_be(env, addr, new_lo, new_hi, false);
+}
+
+uint64_t HELPER(paired_cmpxchg64_be_parallel)(CPUARMState *env, uint64_t addr,
+ uint64_t new_lo, uint64_t new_hi)
+{
+ return do_paired_cmpxchg64_be(env, addr, new_lo, new_hi, true);
+}
diff --git a/target/arm/helper-a64.h b/target/arm/helper-a64.h
index 6f9eaba533..85d86741db 100644
--- a/target/arm/helper-a64.h
+++ b/target/arm/helper-a64.h
@@ -43,4 +43,8 @@ DEF_HELPER_FLAGS_2(fcvtx_f64_to_f32, TCG_CALL_NO_RWG, f32, f64, env)
DEF_HELPER_FLAGS_3(crc32_64, TCG_CALL_NO_RWG_SE, i64, i64, i64, i32)
DEF_HELPER_FLAGS_3(crc32c_64, TCG_CALL_NO_RWG_SE, i64, i64, i64, i32)
DEF_HELPER_FLAGS_4(paired_cmpxchg64_le, TCG_CALL_NO_WG, i64, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(paired_cmpxchg64_le_parallel, TCG_CALL_NO_WG,
+ i64, env, i64, i64, i64)
DEF_HELPER_FLAGS_4(paired_cmpxchg64_be, TCG_CALL_NO_WG, i64, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(paired_cmpxchg64_be_parallel, TCG_CALL_NO_WG,
+ i64, env, i64, i64, i64)
diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
index 3914145709..138d0df82f 100644
--- a/target/arm/op_helper.c
+++ b/target/arm/op_helper.c
@@ -502,13 +502,6 @@ void HELPER(yield)(CPUARMState *env)
ARMCPU *cpu = arm_env_get_cpu(env);
CPUState *cs = CPU(cpu);
- /* When running in MTTCG we don't generate jumps to the yield and
- * WFE helpers as it won't affect the scheduling of other vCPUs.
- * If we wanted to more completely model WFE/SEV so we don't busy
- * spin unnecessarily we would need to do something more involved.
- */
- g_assert(!parallel_cpus);
-
/* This is a non-trappable hint instruction that generally indicates
* that the guest is currently busy-looping. Yield control back to the
* top level loop so that a more deserving VCPU has a chance to run.
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index fc5419df7f..e98fbcf261 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -348,7 +348,8 @@ static inline bool use_goto_tb(DisasContext *s, int n, uint64_t dest)
/* No direct tb linking with singlestep (either QEMU's or the ARM
* debug architecture kind) or deterministic io
*/
- if (s->base.singlestep_enabled || s->ss_active || (s->base.tb->cflags & CF_LAST_IO)) {
+ if (s->base.singlestep_enabled || s->ss_active ||
+ (tb_cflags(s->base.tb) & CF_LAST_IO)) {
return false;
}
@@ -1335,13 +1336,18 @@ static void handle_hint(DisasContext *s, uint32_t insn,
case 3: /* WFI */
s->base.is_jmp = DISAS_WFI;
return;
+ /* When running in MTTCG we don't generate jumps to the yield and
+ * WFE helpers as it won't affect the scheduling of other vCPUs.
+ * If we wanted to more completely model WFE/SEV so we don't busy
+ * spin unnecessarily we would need to do something more involved.
+ */
case 1: /* YIELD */
- if (!parallel_cpus) {
+ if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
s->base.is_jmp = DISAS_YIELD;
}
return;
case 2: /* WFE */
- if (!parallel_cpus) {
+ if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
s->base.is_jmp = DISAS_WFE;
}
return;
@@ -1561,7 +1567,7 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
break;
}
- if ((s->base.tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
+ if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
gen_io_start();
}
@@ -1592,7 +1598,7 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
}
}
- if ((s->base.tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
+ if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
/* I/O operations must end the TB here (whether read or write) */
gen_io_end();
s->base.is_jmp = DISAS_UPDATE;
@@ -1930,11 +1936,25 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
MO_64 | MO_ALIGN | s->be_data);
tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val);
} else if (s->be_data == MO_LE) {
- gen_helper_paired_cmpxchg64_le(tmp, cpu_env, cpu_exclusive_addr,
- cpu_reg(s, rt), cpu_reg(s, rt2));
+ if (tb_cflags(s->base.tb) & CF_PARALLEL) {
+ gen_helper_paired_cmpxchg64_le_parallel(tmp, cpu_env,
+ cpu_exclusive_addr,
+ cpu_reg(s, rt),
+ cpu_reg(s, rt2));
+ } else {
+ gen_helper_paired_cmpxchg64_le(tmp, cpu_env, cpu_exclusive_addr,
+ cpu_reg(s, rt), cpu_reg(s, rt2));
+ }
} else {
- gen_helper_paired_cmpxchg64_be(tmp, cpu_env, cpu_exclusive_addr,
- cpu_reg(s, rt), cpu_reg(s, rt2));
+ if (tb_cflags(s->base.tb) & CF_PARALLEL) {
+ gen_helper_paired_cmpxchg64_be_parallel(tmp, cpu_env,
+ cpu_exclusive_addr,
+ cpu_reg(s, rt),
+ cpu_reg(s, rt2));
+ } else {
+ gen_helper_paired_cmpxchg64_be(tmp, cpu_env, cpu_exclusive_addr,
+ cpu_reg(s, rt), cpu_reg(s, rt2));
+ }
}
} else {
tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr, cpu_exclusive_val,
diff --git a/target/arm/translate.c b/target/arm/translate.c
index 9d31769c8d..6ba4ae92dc 100644
--- a/target/arm/translate.c
+++ b/target/arm/translate.c
@@ -58,7 +58,6 @@
#define IS_USER(s) (s->user)
#endif
-TCGv_env cpu_env;
/* We reuse the same 64-bit temporaries for efficiency. */
static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
static TCGv_i32 cpu_R[16];
@@ -81,9 +80,6 @@ void arm_translate_init(void)
{
int i;
- cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
- tcg_ctx.tcg_env = cpu_env;
-
for (i = 0; i < 16; i++) {
cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
offsetof(CPUARMState, regs[i]),
@@ -4546,8 +4542,13 @@ static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
static void gen_nop_hint(DisasContext *s, int val)
{
switch (val) {
+ /* When running in MTTCG we don't generate jumps to the yield and
+ * WFE helpers as it won't affect the scheduling of other vCPUs.
+ * If we wanted to more completely model WFE/SEV so we don't busy
+ * spin unnecessarily we would need to do something more involved.
+ */
case 1: /* yield */
- if (!parallel_cpus) {
+ if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
gen_set_pc_im(s, s->pc);
s->base.is_jmp = DISAS_YIELD;
}
@@ -4557,7 +4558,7 @@ static void gen_nop_hint(DisasContext *s, int val)
s->base.is_jmp = DISAS_WFI;
break;
case 2: /* wfe */
- if (!parallel_cpus) {
+ if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
gen_set_pc_im(s, s->pc);
s->base.is_jmp = DISAS_WFE;
}
@@ -7704,7 +7705,7 @@ static int disas_coproc_insn(DisasContext *s, uint32_t insn)
break;
}
- if ((s->base.tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
+ if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
gen_io_start();
}
@@ -7795,7 +7796,7 @@ static int disas_coproc_insn(DisasContext *s, uint32_t insn)
}
}
- if ((s->base.tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
+ if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
/* I/O operations must end the TB here (whether read or write) */
gen_io_end();
gen_lookup_tb(s);
@@ -12253,7 +12254,7 @@ static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
- if (dc->base.tb->cflags & CF_LAST_IO && dc->condjmp) {
+ if (tb_cflags(dc->base.tb) & CF_LAST_IO && dc->condjmp) {
/* FIXME: This can theoretically happen with self-modifying code. */
cpu_abort(cpu, "IO on conditional branch instruction");
}
diff --git a/target/arm/translate.h b/target/arm/translate.h
index 3c96aec956..410ba79c0d 100644
--- a/target/arm/translate.h
+++ b/target/arm/translate.h
@@ -80,7 +80,6 @@ typedef struct DisasCompare {
} DisasCompare;
/* Share the TCG temporaries common between 32 and 64 bit modes. */
-extern TCGv_env cpu_env;
extern TCGv_i32 cpu_NF, cpu_ZF, cpu_CF, cpu_VF;
extern TCGv_i64 cpu_exclusive_addr;
extern TCGv_i64 cpu_exclusive_val;
diff --git a/target/cris/cpu.c b/target/cris/cpu.c
index 88d93f2d11..527a3448bf 100644
--- a/target/cris/cpu.c
+++ b/target/cris/cpu.c
@@ -181,7 +181,6 @@ static void cris_cpu_initfn(Object *obj)
CRISCPU *cpu = CRIS_CPU(obj);
CRISCPUClass *ccc = CRIS_CPU_GET_CLASS(obj);
CPUCRISState *env = &cpu->env;
- static bool tcg_initialized;
cs->env_ptr = env;
@@ -191,15 +190,6 @@ static void cris_cpu_initfn(Object *obj)
/* IRQ and NMI lines. */
qdev_init_gpio_in(DEVICE(cpu), cris_cpu_set_irq, 2);
#endif
-
- if (tcg_enabled() && !tcg_initialized) {
- tcg_initialized = true;
- if (env->pregs[PR_VR] < 32) {
- cris_initialize_crisv10_tcg();
- } else {
- cris_initialize_tcg();
- }
- }
}
static void crisv8_cpu_class_init(ObjectClass *oc, void *data)
@@ -210,6 +200,7 @@ static void crisv8_cpu_class_init(ObjectClass *oc, void *data)
ccc->vr = 8;
cc->do_interrupt = crisv10_cpu_do_interrupt;
cc->gdb_read_register = crisv10_cpu_gdb_read_register;
+ cc->tcg_initialize = cris_initialize_crisv10_tcg;
}
static void crisv9_cpu_class_init(ObjectClass *oc, void *data)
@@ -220,6 +211,7 @@ static void crisv9_cpu_class_init(ObjectClass *oc, void *data)
ccc->vr = 9;
cc->do_interrupt = crisv10_cpu_do_interrupt;
cc->gdb_read_register = crisv10_cpu_gdb_read_register;
+ cc->tcg_initialize = cris_initialize_crisv10_tcg;
}
static void crisv10_cpu_class_init(ObjectClass *oc, void *data)
@@ -230,6 +222,7 @@ static void crisv10_cpu_class_init(ObjectClass *oc, void *data)
ccc->vr = 10;
cc->do_interrupt = crisv10_cpu_do_interrupt;
cc->gdb_read_register = crisv10_cpu_gdb_read_register;
+ cc->tcg_initialize = cris_initialize_crisv10_tcg;
}
static void crisv11_cpu_class_init(ObjectClass *oc, void *data)
@@ -240,6 +233,7 @@ static void crisv11_cpu_class_init(ObjectClass *oc, void *data)
ccc->vr = 11;
cc->do_interrupt = crisv10_cpu_do_interrupt;
cc->gdb_read_register = crisv10_cpu_gdb_read_register;
+ cc->tcg_initialize = cris_initialize_crisv10_tcg;
}
static void crisv17_cpu_class_init(ObjectClass *oc, void *data)
@@ -250,6 +244,7 @@ static void crisv17_cpu_class_init(ObjectClass *oc, void *data)
ccc->vr = 17;
cc->do_interrupt = crisv10_cpu_do_interrupt;
cc->gdb_read_register = crisv10_cpu_gdb_read_register;
+ cc->tcg_initialize = cris_initialize_crisv10_tcg;
}
static void crisv32_cpu_class_init(ObjectClass *oc, void *data)
@@ -322,6 +317,7 @@ static void cris_cpu_class_init(ObjectClass *oc, void *data)
cc->gdb_stop_before_watchpoint = true;
cc->disas_set_info = cris_disas_set_info;
+ cc->tcg_initialize = cris_initialize_tcg;
}
static const TypeInfo cris_cpu_type_info = {
diff --git a/target/cris/translate.c b/target/cris/translate.c
index b1fda57c74..2831419845 100644
--- a/target/cris/translate.c
+++ b/target/cris/translate.c
@@ -66,7 +66,6 @@
#define CC_MASK_NZVC 0xf
#define CC_MASK_RNZV 0x10e
-static TCGv_env cpu_env;
static TCGv cpu_R[16];
static TCGv cpu_PR[16];
static TCGv cc_x;
@@ -839,7 +838,7 @@ static void cris_alu(DisasContext *dc, int op,
}
tcg_gen_or_tl(d, d, tmp);
}
- if (!TCGV_EQUAL(tmp, d)) {
+ if (tmp != d) {
tcg_temp_free(tmp);
}
}
@@ -1162,7 +1161,7 @@ static inline void t_gen_sext(TCGv d, TCGv s, int size)
tcg_gen_ext8s_i32(d, s);
} else if (size == 2) {
tcg_gen_ext16s_i32(d, s);
- } else if (!TCGV_EQUAL(d, s)) {
+ } else {
tcg_gen_mov_tl(d, s);
}
}
@@ -1173,7 +1172,7 @@ static inline void t_gen_zext(TCGv d, TCGv s, int size)
tcg_gen_ext8u_i32(d, s);
} else if (size == 2) {
tcg_gen_ext16u_i32(d, s);
- } else if (!TCGV_EQUAL(d, s)) {
+ } else {
tcg_gen_mov_tl(d, s);
}
}
@@ -3141,7 +3140,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
num_insns = 0;
- max_insns = tb->cflags & CF_COUNT_MASK;
+ max_insns = tb_cflags(tb) & CF_COUNT_MASK;
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
}
@@ -3171,7 +3170,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
/* Pretty disas. */
LOG_DIS("%8.8x:\t", dc->pc);
- if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
+ if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
gen_io_start();
}
dc->clear_x = 1;
@@ -3244,7 +3243,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
npc = dc->pc;
- if (tb->cflags & CF_LAST_IO)
+ if (tb_cflags(tb) & CF_LAST_IO)
gen_io_end();
/* Force an update if the per-tb cpu state has changed. */
if (dc->is_jmp == DISAS_NEXT
@@ -3367,8 +3366,6 @@ void cris_initialize_tcg(void)
{
int i;
- cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
- tcg_ctx.tcg_env = cpu_env;
cc_x = tcg_global_mem_new(cpu_env,
offsetof(CPUCRISState, cc_x), "cc_x");
cc_src = tcg_global_mem_new(cpu_env,
diff --git a/target/cris/translate_v10.c b/target/cris/translate_v10.c
index 4a0b485d8e..fce78825cc 100644
--- a/target/cris/translate_v10.c
+++ b/target/cris/translate_v10.c
@@ -1272,8 +1272,6 @@ void cris_initialize_crisv10_tcg(void)
{
int i;
- cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
- tcg_ctx.tcg_env = cpu_env;
cc_x = tcg_global_mem_new(cpu_env,
offsetof(CPUCRISState, cc_x), "cc_x");
cc_src = tcg_global_mem_new(cpu_env,
diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c
index a477b452f0..9e7b0d4ccb 100644
--- a/target/hppa/cpu.c
+++ b/target/hppa/cpu.c
@@ -108,8 +108,6 @@ static void hppa_cpu_initfn(Object *obj)
cs->env_ptr = env;
cpu_hppa_loaded_fr0(env);
set_snan_bit_is_one(true, &env->fp_status);
-
- hppa_translate_init();
}
static ObjectClass *hppa_cpu_class_by_name(const char *cpu_model)
@@ -136,6 +134,7 @@ static void hppa_cpu_class_init(ObjectClass *oc, void *data)
cc->gdb_write_register = hppa_cpu_gdb_write_register;
cc->handle_mmu_fault = hppa_cpu_handle_mmu_fault;
cc->disas_set_info = hppa_cpu_disas_set_info;
+ cc->tcg_initialize = hppa_translate_init;
cc->gdb_num_core_regs = 128;
}
diff --git a/target/hppa/helper.h b/target/hppa/helper.h
index 789f07fc0a..0a6b900555 100644
--- a/target/hppa/helper.h
+++ b/target/hppa/helper.h
@@ -3,7 +3,9 @@ DEF_HELPER_FLAGS_2(tsv, TCG_CALL_NO_WG, void, env, tl)
DEF_HELPER_FLAGS_2(tcond, TCG_CALL_NO_WG, void, env, tl)
DEF_HELPER_FLAGS_3(stby_b, TCG_CALL_NO_WG, void, env, tl, tl)
+DEF_HELPER_FLAGS_3(stby_b_parallel, TCG_CALL_NO_WG, void, env, tl, tl)
DEF_HELPER_FLAGS_3(stby_e, TCG_CALL_NO_WG, void, env, tl, tl)
+DEF_HELPER_FLAGS_3(stby_e_parallel, TCG_CALL_NO_WG, void, env, tl, tl)
DEF_HELPER_FLAGS_1(probe_r, TCG_CALL_NO_RWG_SE, tl, tl)
DEF_HELPER_FLAGS_1(probe_w, TCG_CALL_NO_RWG_SE, tl, tl)
diff --git a/target/hppa/op_helper.c b/target/hppa/op_helper.c
index c05c0d5572..3104404e8d 100644
--- a/target/hppa/op_helper.c
+++ b/target/hppa/op_helper.c
@@ -76,7 +76,8 @@ static void atomic_store_3(CPUHPPAState *env, target_ulong addr, uint32_t val,
#endif
}
-void HELPER(stby_b)(CPUHPPAState *env, target_ulong addr, target_ulong val)
+static void do_stby_b(CPUHPPAState *env, target_ulong addr, target_ulong val,
+ bool parallel)
{
uintptr_t ra = GETPC();
@@ -89,7 +90,7 @@ void HELPER(stby_b)(CPUHPPAState *env, target_ulong addr, target_ulong val)
break;
case 1:
/* The 3 byte store must appear atomic. */
- if (parallel_cpus) {
+ if (parallel) {
atomic_store_3(env, addr, val, 0x00ffffffu, ra);
} else {
cpu_stb_data_ra(env, addr, val >> 16, ra);
@@ -102,14 +103,26 @@ void HELPER(stby_b)(CPUHPPAState *env, target_ulong addr, target_ulong val)
}
}
-void HELPER(stby_e)(CPUHPPAState *env, target_ulong addr, target_ulong val)
+void HELPER(stby_b)(CPUHPPAState *env, target_ulong addr, target_ulong val)
+{
+ do_stby_b(env, addr, val, false);
+}
+
+void HELPER(stby_b_parallel)(CPUHPPAState *env, target_ulong addr,
+ target_ulong val)
+{
+ do_stby_b(env, addr, val, true);
+}
+
+static void do_stby_e(CPUHPPAState *env, target_ulong addr, target_ulong val,
+ bool parallel)
{
uintptr_t ra = GETPC();
switch (addr & 3) {
case 3:
/* The 3 byte store must appear atomic. */
- if (parallel_cpus) {
+ if (parallel) {
atomic_store_3(env, addr - 3, val, 0xffffff00u, ra);
} else {
cpu_stw_data_ra(env, addr - 3, val >> 16, ra);
@@ -132,6 +145,17 @@ void HELPER(stby_e)(CPUHPPAState *env, target_ulong addr, target_ulong val)
}
}
+void HELPER(stby_e)(CPUHPPAState *env, target_ulong addr, target_ulong val)
+{
+ do_stby_e(env, addr, val, false);
+}
+
+void HELPER(stby_e_parallel)(CPUHPPAState *env, target_ulong addr,
+ target_ulong val)
+{
+ do_stby_e(env, addr, val, true);
+}
+
target_ulong HELPER(probe_r)(target_ulong addr)
{
return page_check_range(addr, 1, PAGE_READ);
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
index ca6a6d3372..53aa1f88c4 100644
--- a/target/hppa/translate.c
+++ b/target/hppa/translate.c
@@ -83,7 +83,6 @@ typedef struct DisasInsn {
} DisasInsn;
/* global register indexes */
-static TCGv_env cpu_env;
static TCGv cpu_gr[32];
static TCGv cpu_iaoq_f;
static TCGv cpu_iaoq_b;
@@ -124,17 +123,8 @@ void hppa_translate_init(void)
"r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
};
- static bool done_init = 0;
int i;
- if (done_init) {
- return;
- }
- done_init = 1;
-
- cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
- tcg_ctx.tcg_env = cpu_env;
-
TCGV_UNUSED(cpu_gr[0]);
for (i = 1; i < 32; i++) {
cpu_gr[i] = tcg_global_mem_new(cpu_env,
@@ -475,7 +465,7 @@ static DisasJumpType gen_illegal(DisasContext *ctx)
static bool use_goto_tb(DisasContext *ctx, target_ulong dest)
{
/* Suppress goto_tb in the case of single-steping and IO. */
- if ((ctx->base.tb->cflags & CF_LAST_IO) || ctx->base.singlestep_enabled) {
+ if ((tb_cflags(ctx->base.tb) & CF_LAST_IO) || ctx->base.singlestep_enabled) {
return false;
}
return true;
@@ -2297,9 +2287,17 @@ static DisasJumpType trans_stby(DisasContext *ctx, uint32_t insn,
val = load_gpr(ctx, rt);
if (a) {
- gen_helper_stby_e(cpu_env, addr, val);
+ if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
+ gen_helper_stby_e_parallel(cpu_env, addr, val);
+ } else {
+ gen_helper_stby_e(cpu_env, addr, val);
+ }
} else {
- gen_helper_stby_b(cpu_env, addr, val);
+ if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
+ gen_helper_stby_b_parallel(cpu_env, addr, val);
+ } else {
+ gen_helper_stby_b(cpu_env, addr, val);
+ }
}
if (m) {
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index cf890b763b..6f21a5e518 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -3721,10 +3721,6 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
return;
}
- if (tcg_enabled()) {
- tcg_x86_init();
- }
-
#ifndef CONFIG_USER_ONLY
qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
@@ -4234,6 +4230,9 @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
#endif
cc->cpu_exec_enter = x86_cpu_exec_enter;
cc->cpu_exec_exit = x86_cpu_exec_exit;
+#ifdef CONFIG_TCG
+ cc->tcg_initialize = tcg_x86_init;
+#endif
cc->disas_set_info = x86_disas_set_info;
dc->user_creatable = true;
diff --git a/target/i386/translate.c b/target/i386/translate.c
index e81479a50c..088a9d9766 100644
--- a/target/i386/translate.c
+++ b/target/i386/translate.c
@@ -72,7 +72,6 @@
//#define MACRO_TEST 1
/* global register indexes */
-static TCGv_env cpu_env;
static TCGv cpu_A0;
static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2, cpu_cc_srcT;
static TCGv_i32 cpu_cc_op;
@@ -742,7 +741,7 @@ static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
size = s->cc_op - CC_OP_SUBB;
t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
/* If no temporary was used, be careful not to alias t1 and t0. */
- t0 = TCGV_EQUAL(t1, cpu_cc_src) ? cpu_tmp0 : reg;
+ t0 = t1 == cpu_cc_src ? cpu_tmp0 : reg;
tcg_gen_mov_tl(t0, cpu_cc_srcT);
gen_extu(size, t0);
goto add_sub;
@@ -951,7 +950,7 @@ static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
break;
case JCC_L:
gen_compute_eflags(s);
- if (TCGV_EQUAL(reg, cpu_cc_src)) {
+ if (reg == cpu_cc_src) {
reg = cpu_tmp0;
}
tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
@@ -962,7 +961,7 @@ static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
default:
case JCC_LE:
gen_compute_eflags(s);
- if (TCGV_EQUAL(reg, cpu_cc_src)) {
+ if (reg == cpu_cc_src) {
reg = cpu_tmp0;
}
tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
@@ -1118,7 +1117,7 @@ static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot)
static inline void gen_ins(DisasContext *s, TCGMemOp ot)
{
- if (s->base.tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_string_movl_A0_EDI(s);
@@ -1133,14 +1132,14 @@ static inline void gen_ins(DisasContext *s, TCGMemOp ot)
gen_op_movl_T0_Dshift(ot);
gen_op_add_reg_T0(s->aflag, R_EDI);
gen_bpt_io(s, cpu_tmp2_i32, ot);
- if (s->base.tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_end();
}
}
static inline void gen_outs(DisasContext *s, TCGMemOp ot)
{
- if (s->base.tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_string_movl_A0_ESI(s);
@@ -1153,7 +1152,7 @@ static inline void gen_outs(DisasContext *s, TCGMemOp ot)
gen_op_movl_T0_Dshift(ot);
gen_op_add_reg_T0(s->aflag, R_ESI);
gen_bpt_io(s, cpu_tmp2_i32, ot);
- if (s->base.tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_end();
}
}
@@ -5307,7 +5306,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
if (!(s->cpuid_ext_features & CPUID_EXT_CX16))
goto illegal_op;
gen_lea_modrm(env, s, modrm);
- if ((s->prefix & PREFIX_LOCK) && parallel_cpus) {
+ if ((s->prefix & PREFIX_LOCK) && (tb_cflags(s->base.tb) & CF_PARALLEL)) {
gen_helper_cmpxchg16b(cpu_env, cpu_A0);
} else {
gen_helper_cmpxchg16b_unlocked(cpu_env, cpu_A0);
@@ -5318,7 +5317,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
if (!(s->cpuid_features & CPUID_CX8))
goto illegal_op;
gen_lea_modrm(env, s, modrm);
- if ((s->prefix & PREFIX_LOCK) && parallel_cpus) {
+ if ((s->prefix & PREFIX_LOCK) && (tb_cflags(s->base.tb) & CF_PARALLEL)) {
gen_helper_cmpxchg8b(cpu_env, cpu_A0);
} else {
gen_helper_cmpxchg8b_unlocked(cpu_env, cpu_A0);
@@ -6340,7 +6339,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
} else {
gen_ins(s, ot);
- if (s->base.tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_jmp(s, s->pc - s->cs_base);
}
}
@@ -6355,7 +6354,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
} else {
gen_outs(s, ot);
- if (s->base.tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_jmp(s, s->pc - s->cs_base);
}
}
@@ -6371,14 +6370,14 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
tcg_gen_movi_tl(cpu_T0, val);
gen_check_io(s, ot, pc_start - s->cs_base,
SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
- if (s->base.tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_start();
}
tcg_gen_movi_i32(cpu_tmp2_i32, val);
gen_helper_in_func(ot, cpu_T1, cpu_tmp2_i32);
gen_op_mov_reg_v(ot, R_EAX, cpu_T1);
gen_bpt_io(s, cpu_tmp2_i32, ot);
- if (s->base.tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
}
@@ -6392,14 +6391,14 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
svm_is_rep(prefixes));
gen_op_mov_v_reg(ot, cpu_T1, R_EAX);
- if (s->base.tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_start();
}
tcg_gen_movi_i32(cpu_tmp2_i32, val);
tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
gen_bpt_io(s, cpu_tmp2_i32, ot);
- if (s->base.tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
}
@@ -6410,14 +6409,14 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
gen_check_io(s, ot, pc_start - s->cs_base,
SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
- if (s->base.tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_start();
}
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
gen_helper_in_func(ot, cpu_T1, cpu_tmp2_i32);
gen_op_mov_reg_v(ot, R_EAX, cpu_T1);
gen_bpt_io(s, cpu_tmp2_i32, ot);
- if (s->base.tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
}
@@ -6430,14 +6429,14 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
svm_is_rep(prefixes));
gen_op_mov_v_reg(ot, cpu_T1, R_EAX);
- if (s->base.tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_start();
}
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
gen_bpt_io(s, cpu_tmp2_i32, ot);
- if (s->base.tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
}
@@ -7143,11 +7142,11 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
case 0x131: /* rdtsc */
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
- if (s->base.tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_rdtsc(cpu_env);
- if (s->base.tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
}
@@ -7602,11 +7601,11 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
}
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
- if (s->base.tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_rdtscp(cpu_env);
- if (s->base.tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
}
@@ -7971,24 +7970,24 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
if (b & 2) {
- if (s->base.tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_op_mov_v_reg(ot, cpu_T0, rm);
gen_helper_write_crN(cpu_env, tcg_const_i32(reg),
cpu_T0);
- if (s->base.tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_end();
}
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
} else {
- if (s->base.tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_read_crN(cpu_T0, cpu_env, tcg_const_i32(reg));
gen_op_mov_reg_v(ot, rm, cpu_T0);
- if (s->base.tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_end();
}
}
@@ -8366,15 +8365,7 @@ void tcg_x86_init(void)
"bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
};
int i;
- static bool initialized;
- if (initialized) {
- return;
- }
- initialized = true;
-
- cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
- tcg_ctx.tcg_env = cpu_env;
cpu_cc_op = tcg_global_mem_new_i32(cpu_env,
offsetof(CPUX86State, cc_op), "cc_op");
cpu_cc_dst = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_dst),
@@ -8458,7 +8449,7 @@ static int i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu,
record/replay modes and there will always be an
additional step for ecx=0 when icount is enabled.
*/
- dc->repz_opt = !dc->jmp_opt && !(dc->base.tb->cflags & CF_USE_ICOUNT);
+ dc->repz_opt = !dc->jmp_opt && !(tb_cflags(dc->base.tb) & CF_USE_ICOUNT);
#if 0
/* check addseg logic */
if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32))
@@ -8524,7 +8515,7 @@ static void i386_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
the flag and abort the translation to give the irqs a
chance to happen */
dc->base.is_jmp = DISAS_TOO_MANY;
- } else if ((dc->base.tb->cflags & CF_USE_ICOUNT)
+ } else if ((tb_cflags(dc->base.tb) & CF_USE_ICOUNT)
&& ((dc->base.pc_next & TARGET_PAGE_MASK)
!= ((dc->base.pc_next + TARGET_MAX_INSN_SIZE - 1)
& TARGET_PAGE_MASK)
diff --git a/target/lm32/cpu.c b/target/lm32/cpu.c
index bf081f56d2..7f3a292f2b 100644
--- a/target/lm32/cpu.c
+++ b/target/lm32/cpu.c
@@ -163,16 +163,10 @@ static void lm32_cpu_initfn(Object *obj)
CPUState *cs = CPU(obj);
LM32CPU *cpu = LM32_CPU(obj);
CPULM32State *env = &cpu->env;
- static bool tcg_initialized;
cs->env_ptr = env;
env->flags = 0;
-
- if (tcg_enabled() && !tcg_initialized) {
- tcg_initialized = true;
- lm32_translate_init();
- }
}
static void lm32_basic_cpu_initfn(Object *obj)
@@ -286,6 +280,7 @@ static void lm32_cpu_class_init(ObjectClass *oc, void *data)
cc->gdb_stop_before_watchpoint = true;
cc->debug_excp_handler = lm32_debug_excp_handler;
cc->disas_set_info = lm32_cpu_disas_set_info;
+ cc->tcg_initialize = lm32_translate_init;
}
static void lm32_register_cpu_type(const LM32CPUInfo *info)
diff --git a/target/lm32/translate.c b/target/lm32/translate.c
index a83cbdf729..b8b2b13e36 100644
--- a/target/lm32/translate.c
+++ b/target/lm32/translate.c
@@ -53,7 +53,6 @@
#define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */
#define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
-static TCGv_env cpu_env;
static TCGv cpu_R[32];
static TCGv cpu_pc;
static TCGv cpu_ie;
@@ -880,24 +879,24 @@ static void dec_wcsr(DisasContext *dc)
break;
case CSR_IM:
/* mark as an io operation because it could cause an interrupt */
- if (dc->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_wcsr_im(cpu_env, cpu_R[dc->r1]);
tcg_gen_movi_tl(cpu_pc, dc->pc + 4);
- if (dc->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
gen_io_end();
}
dc->is_jmp = DISAS_UPDATE;
break;
case CSR_IP:
/* mark as an io operation because it could cause an interrupt */
- if (dc->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_wcsr_ip(cpu_env, cpu_R[dc->r1]);
tcg_gen_movi_tl(cpu_pc, dc->pc + 4);
- if (dc->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
gen_io_end();
}
dc->is_jmp = DISAS_UPDATE;
@@ -1078,7 +1077,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
num_insns = 0;
- max_insns = tb->cflags & CF_COUNT_MASK;
+ max_insns = tb_cflags(tb) & CF_COUNT_MASK;
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
}
@@ -1106,7 +1105,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
/* Pretty disas. */
LOG_DIS("%8.8x:\t", dc->pc);
- if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
+ if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
gen_io_start();
}
@@ -1119,7 +1118,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
&& (dc->pc < next_page_start)
&& num_insns < max_insns);
- if (tb->cflags & CF_LAST_IO) {
+ if (tb_cflags(tb) & CF_LAST_IO) {
gen_io_end();
}
@@ -1208,9 +1207,6 @@ void lm32_translate_init(void)
{
int i;
- cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
- tcg_ctx.tcg_env = cpu_env;
-
for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
cpu_R[i] = tcg_global_mem_new(cpu_env,
offsetof(CPULM32State, regs[i]),
diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c
index 8c70e0805c..5da19e570b 100644
--- a/target/m68k/cpu.c
+++ b/target/m68k/cpu.c
@@ -247,14 +247,8 @@ static void m68k_cpu_initfn(Object *obj)
CPUState *cs = CPU(obj);
M68kCPU *cpu = M68K_CPU(obj);
CPUM68KState *env = &cpu->env;
- static bool inited;
cs->env_ptr = env;
-
- if (tcg_enabled() && !inited) {
- inited = true;
- m68k_tcg_init();
- }
}
static const VMStateDescription vmstate_m68k_cpu = {
@@ -288,6 +282,7 @@ static void m68k_cpu_class_init(ObjectClass *c, void *data)
cc->get_phys_page_debug = m68k_cpu_get_phys_page_debug;
#endif
cc->disas_set_info = m68k_cpu_disas_set_info;
+ cc->tcg_initialize = m68k_tcg_init;
cc->gdb_num_core_regs = 18;
cc->gdb_core_xml_file = "cf-core.xml";
diff --git a/target/m68k/helper.h b/target/m68k/helper.h
index 475a1f2186..eebe52dae5 100644
--- a/target/m68k/helper.h
+++ b/target/m68k/helper.h
@@ -11,6 +11,7 @@ DEF_HELPER_2(set_sr, void, env, i32)
DEF_HELPER_3(movec, void, env, i32, i32)
DEF_HELPER_4(cas2w, void, env, i32, i32, i32)
DEF_HELPER_4(cas2l, void, env, i32, i32, i32)
+DEF_HELPER_4(cas2l_parallel, void, env, i32, i32, i32)
#define dh_alias_fp ptr
#define dh_ctype_fp FPReg *
diff --git a/target/m68k/op_helper.c b/target/m68k/op_helper.c
index 7b5126c88d..63089511cb 100644
--- a/target/m68k/op_helper.c
+++ b/target/m68k/op_helper.c
@@ -361,6 +361,7 @@ void HELPER(divsll)(CPUM68KState *env, int numr, int regr, int32_t den)
env->dregs[numr] = quot;
}
+/* We're executing in a serial context -- no need to be atomic. */
void HELPER(cas2w)(CPUM68KState *env, uint32_t regs, uint32_t a1, uint32_t a2)
{
uint32_t Dc1 = extract32(regs, 9, 3);
@@ -374,17 +375,11 @@ void HELPER(cas2w)(CPUM68KState *env, uint32_t regs, uint32_t a1, uint32_t a2)
int16_t l1, l2;
uintptr_t ra = GETPC();
- if (parallel_cpus) {
- /* Tell the main loop we need to serialize this insn. */
- cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
- } else {
- /* We're executing in a serial context -- no need to be atomic. */
- l1 = cpu_lduw_data_ra(env, a1, ra);
- l2 = cpu_lduw_data_ra(env, a2, ra);
- if (l1 == c1 && l2 == c2) {
- cpu_stw_data_ra(env, a1, u1, ra);
- cpu_stw_data_ra(env, a2, u2, ra);
- }
+ l1 = cpu_lduw_data_ra(env, a1, ra);
+ l2 = cpu_lduw_data_ra(env, a2, ra);
+ if (l1 == c1 && l2 == c2) {
+ cpu_stw_data_ra(env, a1, u1, ra);
+ cpu_stw_data_ra(env, a2, u2, ra);
}
if (c1 != l1) {
@@ -399,7 +394,8 @@ void HELPER(cas2w)(CPUM68KState *env, uint32_t regs, uint32_t a1, uint32_t a2)
env->dregs[Dc2] = deposit32(env->dregs[Dc2], 0, 16, l2);
}
-void HELPER(cas2l)(CPUM68KState *env, uint32_t regs, uint32_t a1, uint32_t a2)
+static void do_cas2l(CPUM68KState *env, uint32_t regs, uint32_t a1, uint32_t a2,
+ bool parallel)
{
uint32_t Dc1 = extract32(regs, 9, 3);
uint32_t Dc2 = extract32(regs, 6, 3);
@@ -416,7 +412,7 @@ void HELPER(cas2l)(CPUM68KState *env, uint32_t regs, uint32_t a1, uint32_t a2)
TCGMemOpIdx oi;
#endif
- if (parallel_cpus) {
+ if (parallel) {
/* We're executing in a parallel context -- must be atomic. */
#ifdef CONFIG_ATOMIC64
uint64_t c, u, l;
@@ -470,6 +466,17 @@ void HELPER(cas2l)(CPUM68KState *env, uint32_t regs, uint32_t a1, uint32_t a2)
env->dregs[Dc2] = l2;
}
+void HELPER(cas2l)(CPUM68KState *env, uint32_t regs, uint32_t a1, uint32_t a2)
+{
+ do_cas2l(env, regs, a1, a2, false);
+}
+
+void HELPER(cas2l_parallel)(CPUM68KState *env, uint32_t regs, uint32_t a1,
+ uint32_t a2)
+{
+ do_cas2l(env, regs, a1, a2, true);
+}
+
struct bf_data {
uint32_t addr;
uint32_t bofs;
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
index e1e31f622c..b60909222c 100644
--- a/target/m68k/translate.c
+++ b/target/m68k/translate.c
@@ -44,8 +44,6 @@
static TCGv_i32 cpu_halted;
static TCGv_i32 cpu_exception_index;
-static TCGv_env cpu_env;
-
static char cpu_reg_names[2 * 8 * 3 + 5 * 4];
static TCGv cpu_dregs[8];
static TCGv cpu_aregs[8];
@@ -58,7 +56,7 @@ static TCGv_i64 cpu_macc[4];
#define QREG_SP get_areg(s, 7)
static TCGv NULL_QREG;
-#define IS_NULL_QREG(t) (TCGV_EQUAL(t, NULL_QREG))
+#define IS_NULL_QREG(t) (t == NULL_QREG)
/* Used to distinguish stores from bad addressing modes. */
static TCGv store_dummy;
@@ -69,9 +67,6 @@ void m68k_tcg_init(void)
char *p;
int i;
- cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
- tcg_ctx.tcg_env = cpu_env;
-
#define DEFO32(name, offset) \
QREG_##name = tcg_global_mem_new_i32(cpu_env, \
offsetof(CPUM68KState, offset), #name);
@@ -2312,7 +2307,11 @@ DISAS_INSN(cas2w)
(REG(ext1, 6) << 3) |
(REG(ext2, 0) << 6) |
(REG(ext1, 0) << 9));
- gen_helper_cas2w(cpu_env, regs, addr1, addr2);
+ if (tb_cflags(s->tb) & CF_PARALLEL) {
+ gen_helper_exit_atomic(cpu_env);
+ } else {
+ gen_helper_cas2w(cpu_env, regs, addr1, addr2);
+ }
tcg_temp_free(regs);
/* Note that cas2w also assigned to env->cc_op. */
@@ -2358,7 +2357,11 @@ DISAS_INSN(cas2l)
(REG(ext1, 6) << 3) |
(REG(ext2, 0) << 6) |
(REG(ext1, 0) << 9));
- gen_helper_cas2l(cpu_env, regs, addr1, addr2);
+ if (tb_cflags(s->tb) & CF_PARALLEL) {
+ gen_helper_cas2l_parallel(cpu_env, regs, addr1, addr2);
+ } else {
+ gen_helper_cas2l(cpu_env, regs, addr1, addr2);
+ }
tcg_temp_free(regs);
/* Note that cas2l also assigned to env->cc_op. */
@@ -5547,7 +5550,7 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
dc->done_mac = 0;
dc->writeback_mask = 0;
num_insns = 0;
- max_insns = tb->cflags & CF_COUNT_MASK;
+ max_insns = tb_cflags(tb) & CF_COUNT_MASK;
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
}
@@ -5573,7 +5576,7 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
break;
}
- if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
+ if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
gen_io_start();
}
@@ -5585,7 +5588,7 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
(pc_offset) < (TARGET_PAGE_SIZE - 32) &&
num_insns < max_insns);
- if (tb->cflags & CF_LAST_IO)
+ if (tb_cflags(tb) & CF_LAST_IO)
gen_io_end();
if (unlikely(cs->singlestep_enabled)) {
/* Make sure the pc is updated, and raise a debug exception. */
diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c
index ddffe86e9b..5700652e06 100644
--- a/target/microblaze/cpu.c
+++ b/target/microblaze/cpu.c
@@ -205,7 +205,6 @@ static void mb_cpu_initfn(Object *obj)
CPUState *cs = CPU(obj);
MicroBlazeCPU *cpu = MICROBLAZE_CPU(obj);
CPUMBState *env = &cpu->env;
- static bool tcg_initialized;
cs->env_ptr = env;
@@ -215,11 +214,6 @@ static void mb_cpu_initfn(Object *obj)
/* Inbound IRQ and FIR lines */
qdev_init_gpio_in(DEVICE(cpu), microblaze_cpu_set_irq, 2);
#endif
-
- if (tcg_enabled() && !tcg_initialized) {
- tcg_initialized = true;
- mb_tcg_init();
- }
}
static const VMStateDescription vmstate_mb_cpu = {
@@ -289,6 +283,7 @@ static void mb_cpu_class_init(ObjectClass *oc, void *data)
cc->gdb_num_core_regs = 32 + 5;
cc->disas_set_info = mb_disas_set_info;
+ cc->tcg_initialize = mb_tcg_init;
}
static const TypeInfo mb_cpu_type_info = {
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
index fecc61a1ec..e7b5597c46 100644
--- a/target/microblaze/translate.c
+++ b/target/microblaze/translate.c
@@ -53,7 +53,6 @@
#define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
static TCGv env_debug;
-static TCGv_env cpu_env;
static TCGv cpu_R[32];
static TCGv cpu_SR[18];
static TCGv env_imm;
@@ -1666,7 +1665,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
num_insns = 0;
- max_insns = tb->cflags & CF_COUNT_MASK;
+ max_insns = tb_cflags(tb) & CF_COUNT_MASK;
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
}
@@ -1701,7 +1700,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
/* Pretty disas. */
LOG_DIS("%8.8x:\t", dc->pc);
- if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
+ if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
gen_io_start();
}
@@ -1763,7 +1762,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
npc = dc->jmp_pc;
}
- if (tb->cflags & CF_LAST_IO)
+ if (tb_cflags(tb) & CF_LAST_IO)
gen_io_end();
/* Force an update if the per-tb cpu state has changed. */
if (dc->is_jmp == DISAS_NEXT
@@ -1855,9 +1854,6 @@ void mb_tcg_init(void)
{
int i;
- cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
- tcg_ctx.tcg_env = cpu_env;
-
env_debug = tcg_global_mem_new(cpu_env,
offsetof(CPUMBState, debug),
"debug0");
diff --git a/target/mips/cpu.c b/target/mips/cpu.c
index c15b894362..80812f3e08 100644
--- a/target/mips/cpu.c
+++ b/target/mips/cpu.c
@@ -150,10 +150,6 @@ static void mips_cpu_initfn(Object *obj)
cs->env_ptr = env;
env->cpu_model = mcc->cpu_def;
-
- if (tcg_enabled()) {
- mips_tcg_init();
- }
}
static char *mips_cpu_type_name(const char *cpu_model)
@@ -202,6 +198,9 @@ static void mips_cpu_class_init(ObjectClass *c, void *data)
cc->vmsd = &vmstate_mips_cpu;
#endif
cc->disas_set_info = mips_cpu_disas_set_info;
+#ifdef CONFIG_TCG
+ cc->tcg_initialize = mips_tcg_init;
+#endif
cc->gdb_num_core_regs = 73;
cc->gdb_stop_before_watchpoint = true;
diff --git a/target/mips/translate.c b/target/mips/translate.c
index 7c96aff1a0..d0690f7df6 100644
--- a/target/mips/translate.c
+++ b/target/mips/translate.c
@@ -1376,7 +1376,6 @@ enum {
};
/* global register indices */
-static TCGv_env cpu_env;
static TCGv cpu_gpr[32], cpu_PC;
static TCGv cpu_HI[MIPS_DSP_ACC], cpu_LO[MIPS_DSP_ACC];
static TCGv cpu_dspctrl, btarget, bcond;
@@ -5327,11 +5326,11 @@ static void gen_mfc0(DisasContext *ctx, TCGv arg, int reg, int sel)
switch (sel) {
case 0:
/* Mark as an IO operation because we read the time. */
- if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_mfc0_count(arg, cpu_env);
- if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_end();
}
/* Break the TB to be able to take timer interrupts immediately
@@ -5734,7 +5733,7 @@ static void gen_mtc0(DisasContext *ctx, TCGv arg, int reg, int sel)
if (sel != 0)
check_insn(ctx, ISA_MIPS32);
- if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_start();
}
@@ -6401,7 +6400,7 @@ static void gen_mtc0(DisasContext *ctx, TCGv arg, int reg, int sel)
trace_mips_translate_c0("mtc0", rn, reg, sel);
/* For simplicity assume that all writes can cause interrupts. */
- if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_end();
/* BS_STOP isn't sufficient, we need to ensure we break out of
* translated code to check for pending interrupts. */
@@ -6679,11 +6678,11 @@ static void gen_dmfc0(DisasContext *ctx, TCGv arg, int reg, int sel)
switch (sel) {
case 0:
/* Mark as an IO operation because we read the time. */
- if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_mfc0_count(arg, cpu_env);
- if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_end();
}
/* Break the TB to be able to take timer interrupts immediately
@@ -7072,7 +7071,7 @@ static void gen_dmtc0(DisasContext *ctx, TCGv arg, int reg, int sel)
if (sel != 0)
check_insn(ctx, ISA_MIPS64);
- if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_start();
}
@@ -7727,7 +7726,7 @@ static void gen_dmtc0(DisasContext *ctx, TCGv arg, int reg, int sel)
trace_mips_translate_c0("dmtc0", rn, reg, sel);
/* For simplicity assume that all writes can cause interrupts. */
- if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_end();
/* BS_STOP isn't sufficient, we need to ensure we break out of
* translated code to check for pending interrupts. */
@@ -10756,11 +10755,11 @@ static void gen_rdhwr(DisasContext *ctx, int rt, int rd, int sel)
gen_store_gpr(t0, rt);
break;
case 2:
- if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_rdhwr_cc(t0, cpu_env);
- if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_end();
}
gen_store_gpr(t0, rt);
@@ -20248,7 +20247,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
ctx.default_tcg_memop_mask = (ctx.insn_flags & ISA_MIPS32R6) ?
MO_UNALN : MO_ALIGN;
num_insns = 0;
- max_insns = tb->cflags & CF_COUNT_MASK;
+ max_insns = tb_cflags(tb) & CF_COUNT_MASK;
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
}
@@ -20274,7 +20273,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
goto done_generating;
}
- if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
+ if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
gen_io_start();
}
@@ -20335,7 +20334,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
if (singlestep)
break;
}
- if (tb->cflags & CF_LAST_IO) {
+ if (tb_cflags(tb) & CF_LAST_IO) {
gen_io_end();
}
if (cs->singlestep_enabled && ctx.bstate != BS_BRANCH) {
@@ -20453,14 +20452,6 @@ void mips_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
void mips_tcg_init(void)
{
int i;
- static int inited;
-
- /* Initialize various static tables. */
- if (inited)
- return;
-
- cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
- tcg_ctx.tcg_env = cpu_env;
TCGV_UNUSED(cpu_gpr[0]);
for (i = 1; i < 32; i++)
@@ -20506,8 +20497,6 @@ void mips_tcg_init(void)
fpu_fcr31 = tcg_global_mem_new_i32(cpu_env,
offsetof(CPUMIPSState, active_fpu.fcr31),
"fcr31");
-
- inited = 1;
}
#include "translate_init.c"
diff --git a/target/moxie/cpu.c b/target/moxie/cpu.c
index 30bd44fcad..24ab3f3708 100644
--- a/target/moxie/cpu.c
+++ b/target/moxie/cpu.c
@@ -77,14 +77,8 @@ static void moxie_cpu_initfn(Object *obj)
{
CPUState *cs = CPU(obj);
MoxieCPU *cpu = MOXIE_CPU(obj);
- static int inited;
cs->env_ptr = &cpu->env;
-
- if (tcg_enabled() && !inited) {
- inited = 1;
- moxie_translate_init();
- }
}
static ObjectClass *moxie_cpu_class_by_name(const char *cpu_model)
@@ -122,6 +116,7 @@ static void moxie_cpu_class_init(ObjectClass *oc, void *data)
cc->vmsd = &vmstate_moxie_cpu;
#endif
cc->disas_set_info = moxie_cpu_disas_set_info;
+ cc->tcg_initialize = moxie_translate_init;
}
static void moxielite_initfn(Object *obj)
diff --git a/target/moxie/translate.c b/target/moxie/translate.c
index 3cfd232558..28b405f0e4 100644
--- a/target/moxie/translate.c
+++ b/target/moxie/translate.c
@@ -56,7 +56,6 @@ enum {
static TCGv cpu_pc;
static TCGv cpu_gregs[16];
-static TCGv_env cpu_env;
static TCGv cc_a, cc_b;
#include "exec/gen-icount.h"
@@ -94,7 +93,6 @@ void moxie_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
void moxie_translate_init(void)
{
int i;
- static int done_init;
static const char * const gregnames[16] = {
"$fp", "$sp", "$r0", "$r1",
"$r2", "$r3", "$r4", "$r5",
@@ -102,11 +100,6 @@ void moxie_translate_init(void)
"$r10", "$r11", "$r12", "$r13"
};
- if (done_init) {
- return;
- }
- cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
- tcg_ctx.tcg_env = cpu_env;
cpu_pc = tcg_global_mem_new_i32(cpu_env,
offsetof(CPUMoxieState, pc), "$pc");
for (i = 0; i < 16; i++)
@@ -118,8 +111,6 @@ void moxie_translate_init(void)
offsetof(CPUMoxieState, cc_a), "cc_a");
cc_b = tcg_global_mem_new_i32(cpu_env,
offsetof(CPUMoxieState, cc_b), "cc_b");
-
- done_init = 1;
}
static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
@@ -838,7 +829,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
ctx.singlestep_enabled = 0;
ctx.bstate = BS_NONE;
num_insns = 0;
- max_insns = tb->cflags & CF_COUNT_MASK;
+ max_insns = tb_cflags(tb) & CF_COUNT_MASK;
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
}
diff --git a/target/nios2/cpu.c b/target/nios2/cpu.c
index 5b02fb67ea..4742e52c78 100644
--- a/target/nios2/cpu.c
+++ b/target/nios2/cpu.c
@@ -69,18 +69,12 @@ static void nios2_cpu_initfn(Object *obj)
CPUState *cs = CPU(obj);
Nios2CPU *cpu = NIOS2_CPU(obj);
CPUNios2State *env = &cpu->env;
- static bool tcg_initialized;
cs->env_ptr = env;
#if !defined(CONFIG_USER_ONLY)
mmu_init(env);
#endif
-
- if (tcg_enabled() && !tcg_initialized) {
- tcg_initialized = true;
- nios2_tcg_init();
- }
}
static ObjectClass *nios2_cpu_class_by_name(const char *cpu_model)
@@ -215,6 +209,7 @@ static void nios2_cpu_class_init(ObjectClass *oc, void *data)
cc->gdb_read_register = nios2_cpu_gdb_read_register;
cc->gdb_write_register = nios2_cpu_gdb_write_register;
cc->gdb_num_core_regs = 49;
+ cc->tcg_initialize = nios2_tcg_init;
}
static const TypeInfo nios2_cpu_type_info = {
diff --git a/target/nios2/translate.c b/target/nios2/translate.c
index 0d2d03d2d0..72329002ac 100644
--- a/target/nios2/translate.c
+++ b/target/nios2/translate.c
@@ -789,7 +789,6 @@ static const char * const regnames[] = {
"rpc"
};
-static TCGv_ptr cpu_env;
static TCGv cpu_R[NUM_CORE_REGS];
#include "exec/gen-icount.h"
@@ -827,7 +826,7 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
max_insns = 1;
} else {
int page_insns = (TARGET_PAGE_SIZE - (tb->pc & TARGET_PAGE_MASK)) / 4;
- max_insns = tb->cflags & CF_COUNT_MASK;
+ max_insns = tb_cflags(tb) & CF_COUNT_MASK;
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
}
@@ -854,7 +853,7 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
break;
}
- if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
+ if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
gen_io_start();
}
@@ -871,7 +870,7 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
!tcg_op_buf_full() &&
num_insns < max_insns);
- if (tb->cflags & CF_LAST_IO) {
+ if (tb_cflags(tb) & CF_LAST_IO) {
gen_io_end();
}
@@ -947,9 +946,6 @@ void nios2_tcg_init(void)
{
int i;
- cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
- tcg_ctx.tcg_env = cpu_env;
-
for (i = 0; i < NUM_CORE_REGS; i++) {
cpu_R[i] = tcg_global_mem_new(cpu_env,
offsetof(CPUNios2State, regs[i]),
diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c
index a6d2049684..a8db869e50 100644
--- a/target/openrisc/cpu.c
+++ b/target/openrisc/cpu.c
@@ -86,18 +86,12 @@ static void openrisc_cpu_initfn(Object *obj)
{
CPUState *cs = CPU(obj);
OpenRISCCPU *cpu = OPENRISC_CPU(obj);
- static int inited;
cs->env_ptr = &cpu->env;
#ifndef CONFIG_USER_ONLY
cpu_openrisc_mmu_init(cpu);
#endif
-
- if (tcg_enabled() && !inited) {
- inited = 1;
- openrisc_translate_init();
- }
}
/* CPU models */
@@ -169,6 +163,7 @@ static void openrisc_cpu_class_init(ObjectClass *oc, void *data)
dc->vmsd = &vmstate_openrisc_cpu;
#endif
cc->gdb_num_core_regs = 32 + 3;
+ cc->tcg_initialize = openrisc_translate_init;
}
static void cpu_register(const OpenRISCCPUInfo *info)
diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c
index 99f2b463ce..2747b24cf0 100644
--- a/target/openrisc/translate.c
+++ b/target/openrisc/translate.c
@@ -53,7 +53,6 @@ typedef struct DisasContext {
bool singlestep_enabled;
} DisasContext;
-static TCGv_env cpu_env;
static TCGv cpu_sr;
static TCGv cpu_R[32];
static TCGv cpu_R0;
@@ -80,8 +79,6 @@ void openrisc_translate_init(void)
};
int i;
- cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
- tcg_ctx.tcg_env = cpu_env;
cpu_sr = tcg_global_mem_new(cpu_env,
offsetof(CPUOpenRISCState, sr), "sr");
cpu_dflag = tcg_global_mem_new_i32(cpu_env,
@@ -1546,7 +1543,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
num_insns = 0;
- max_insns = tb->cflags & CF_COUNT_MASK;
+ max_insns = tb_cflags(tb) & CF_COUNT_MASK;
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
@@ -1589,7 +1586,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
break;
}
- if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
+ if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
gen_io_start();
}
disas_openrisc_insn(dc, cpu);
@@ -1612,7 +1609,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
&& (dc->pc < next_page_start)
&& num_insns < max_insns);
- if (tb->cflags & CF_LAST_IO) {
+ if (tb_cflags(tb) & CF_LAST_IO) {
gen_io_end();
}
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
index 469ebeb446..998fbed848 100644
--- a/target/ppc/translate.c
+++ b/target/ppc/translate.c
@@ -51,7 +51,6 @@
/* Code translation helpers */
/* global register indexes */
-static TCGv_env cpu_env;
static char cpu_reg_names[10*3 + 22*4 /* GPR */
+ 10*4 + 22*5 /* SPE GPRh */
+ 10*4 + 22*5 /* FPR */
@@ -84,13 +83,6 @@ void ppc_translate_init(void)
int i;
char* p;
size_t cpu_reg_names_size;
- static int done_init = 0;
-
- if (done_init)
- return;
-
- cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
- tcg_ctx.tcg_env = cpu_env;
p = cpu_reg_names;
cpu_reg_names_size = sizeof(cpu_reg_names);
@@ -191,8 +183,6 @@ void ppc_translate_init(void)
cpu_access_type = tcg_global_mem_new_i32(cpu_env,
offsetof(CPUPPCState, access_type), "access_type");
-
- done_init = 1;
}
/* internal defines */
@@ -902,7 +892,7 @@ static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1,
gen_set_Rc0(ctx, t0);
}
- if (!TCGV_EQUAL(t0, ret)) {
+ if (t0 != ret) {
tcg_gen_mov_tl(ret, t0);
tcg_temp_free(t0);
}
@@ -1438,7 +1428,7 @@ static inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1,
gen_set_Rc0(ctx, t0);
}
- if (!TCGV_EQUAL(t0, ret)) {
+ if (t0 != ret) {
tcg_gen_mov_tl(ret, t0);
tcg_temp_free(t0);
}
@@ -7279,7 +7269,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
msr_se = 1;
#endif
num_insns = 0;
- max_insns = tb->cflags & CF_COUNT_MASK;
+ max_insns = tb_cflags(tb) & CF_COUNT_MASK;
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
}
@@ -7307,7 +7297,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
LOG_DISAS("----------------\n");
LOG_DISAS("nip=" TARGET_FMT_lx " super=%d ir=%d\n",
ctx.nip, ctx.mem_idx, (int)msr_ir);
- if (num_insns == max_insns && (tb->cflags & CF_LAST_IO))
+ if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO))
gen_io_start();
if (unlikely(need_byteswap(&ctx))) {
ctx.opcode = bswap32(cpu_ldl_code(env, ctx.nip));
@@ -7388,7 +7378,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
exit(1);
}
}
- if (tb->cflags & CF_LAST_IO)
+ if (tb_cflags(tb) & CF_LAST_IO)
gen_io_end();
if (ctx.exception == POWERPC_EXCP_NONE) {
gen_goto_tb(&ctx, 0, ctx.nip);
diff --git a/target/ppc/translate_init.c b/target/ppc/translate_init.c
index 41f46193a1..b9c49c22f2 100644
--- a/target/ppc/translate_init.c
+++ b/target/ppc/translate_init.c
@@ -177,11 +177,11 @@ static void spr_write_ureg(DisasContext *ctx, int sprn, int gprn)
#if !defined(CONFIG_USER_ONLY)
static void spr_read_decr(DisasContext *ctx, int gprn, int sprn)
{
- if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_load_decr(cpu_gpr[gprn], cpu_env);
- if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_end();
gen_stop_exception(ctx);
}
@@ -189,11 +189,11 @@ static void spr_read_decr(DisasContext *ctx, int gprn, int sprn)
static void spr_write_decr(DisasContext *ctx, int sprn, int gprn)
{
- if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_store_decr(cpu_env, cpu_gpr[gprn]);
- if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_end();
gen_stop_exception(ctx);
}
@@ -204,11 +204,11 @@ static void spr_write_decr(DisasContext *ctx, int sprn, int gprn)
/* Time base */
static void spr_read_tbl(DisasContext *ctx, int gprn, int sprn)
{
- if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_load_tbl(cpu_gpr[gprn], cpu_env);
- if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_end();
gen_stop_exception(ctx);
}
@@ -216,11 +216,11 @@ static void spr_read_tbl(DisasContext *ctx, int gprn, int sprn)
static void spr_read_tbu(DisasContext *ctx, int gprn, int sprn)
{
- if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_load_tbu(cpu_gpr[gprn], cpu_env);
- if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_end();
gen_stop_exception(ctx);
}
@@ -241,11 +241,11 @@ static void spr_read_atbu(DisasContext *ctx, int gprn, int sprn)
#if !defined(CONFIG_USER_ONLY)
static void spr_write_tbl(DisasContext *ctx, int sprn, int gprn)
{
- if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_store_tbl(cpu_env, cpu_gpr[gprn]);
- if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_end();
gen_stop_exception(ctx);
}
@@ -253,11 +253,11 @@ static void spr_write_tbl(DisasContext *ctx, int sprn, int gprn)
static void spr_write_tbu(DisasContext *ctx, int sprn, int gprn)
{
- if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_store_tbu(cpu_env, cpu_gpr[gprn]);
- if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_end();
gen_stop_exception(ctx);
}
@@ -285,11 +285,11 @@ static void spr_read_purr(DisasContext *ctx, int gprn, int sprn)
/* HDECR */
static void spr_read_hdecr(DisasContext *ctx, int gprn, int sprn)
{
- if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_load_hdecr(cpu_gpr[gprn], cpu_env);
- if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_end();
gen_stop_exception(ctx);
}
@@ -297,11 +297,11 @@ static void spr_read_hdecr(DisasContext *ctx, int gprn, int sprn)
static void spr_write_hdecr(DisasContext *ctx, int sprn, int gprn)
{
- if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_store_hdecr(cpu_env, cpu_gpr[gprn]);
- if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_end();
gen_stop_exception(ctx);
}
@@ -10500,10 +10500,6 @@ static void ppc_cpu_initfn(Object *obj)
env->sps = (env->mmu_model & POWERPC_MMU_64K) ? defsps_64k : defsps_4k;
}
#endif /* defined(TARGET_PPC64) */
-
- if (tcg_enabled()) {
- ppc_translate_init();
- }
}
static bool ppc_pvr_match_default(PowerPCCPUClass *pcc, uint32_t pvr)
@@ -10608,8 +10604,11 @@ static void ppc_cpu_class_init(ObjectClass *oc, void *data)
#ifndef CONFIG_USER_ONLY
cc->virtio_is_big_endian = ppc_cpu_is_big_endian;
#endif
+#ifdef CONFIG_TCG
+ cc->tcg_initialize = ppc_translate_init;
+#endif
cc->disas_set_info = ppc_disas_set_info;
-
+
dc->fw_name = "PowerPC,UNKNOWN";
}
diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c
index 95f4283188..ae3cee91a2 100644
--- a/target/s390x/cpu.c
+++ b/target/s390x/cpu.c
@@ -241,7 +241,6 @@ static void s390_cpu_initfn(Object *obj)
CPUState *cs = CPU(obj);
S390CPU *cpu = S390_CPU(obj);
CPUS390XState *env = &cpu->env;
- static bool inited;
#if !defined(CONFIG_USER_ONLY)
struct tm tm;
#endif
@@ -259,11 +258,6 @@ static void s390_cpu_initfn(Object *obj)
env->cpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, s390x_cpu_timer, cpu);
s390_cpu_set_state(CPU_STATE_STOPPED, cpu);
#endif
-
- if (tcg_enabled() && !inited) {
- inited = true;
- s390x_translate_init();
- }
}
static void s390_cpu_finalize(Object *obj)
@@ -503,6 +497,9 @@ static void s390_cpu_class_init(ObjectClass *oc, void *data)
#endif
#endif
cc->disas_set_info = s390_cpu_disas_set_info;
+#ifdef CONFIG_TCG
+ cc->tcg_initialize = s390x_translate_init;
+#endif
cc->gdb_num_core_regs = S390_NUM_CORE_REGS;
cc->gdb_core_xml_file = "s390x-core64.xml";
diff --git a/target/s390x/helper.h b/target/s390x/helper.h
index 81c5727168..9459b73c73 100644
--- a/target/s390x/helper.h
+++ b/target/s390x/helper.h
@@ -34,7 +34,9 @@ DEF_HELPER_3(celgb, i64, env, i64, i32)
DEF_HELPER_3(cdlgb, i64, env, i64, i32)
DEF_HELPER_3(cxlgb, i64, env, i64, i32)
DEF_HELPER_4(cdsg, void, env, i64, i32, i32)
+DEF_HELPER_4(cdsg_parallel, void, env, i64, i32, i32)
DEF_HELPER_4(csst, i32, env, i32, i64, i64)
+DEF_HELPER_4(csst_parallel, i32, env, i32, i64, i64)
DEF_HELPER_FLAGS_3(aeb, TCG_CALL_NO_WG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(adb, TCG_CALL_NO_WG, i64, env, i64, i64)
DEF_HELPER_FLAGS_5(axb, TCG_CALL_NO_WG, i64, env, i64, i64, i64, i64)
@@ -106,7 +108,9 @@ DEF_HELPER_FLAGS_2(sfas, TCG_CALL_NO_WG, void, env, i64)
DEF_HELPER_FLAGS_1(popcnt, TCG_CALL_NO_RWG_SE, i64, i64)
DEF_HELPER_2(stfle, i32, env, i64)
DEF_HELPER_FLAGS_2(lpq, TCG_CALL_NO_WG, i64, env, i64)
+DEF_HELPER_FLAGS_2(lpq_parallel, TCG_CALL_NO_WG, i64, env, i64)
DEF_HELPER_FLAGS_4(stpq, TCG_CALL_NO_WG, void, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(stpq_parallel, TCG_CALL_NO_WG, void, env, i64, i64, i64)
DEF_HELPER_4(mvcos, i32, env, i64, i64, i64)
DEF_HELPER_4(cu12, i32, env, i32, i32, i32)
DEF_HELPER_4(cu14, i32, env, i32, i32, i32)
diff --git a/target/s390x/mem_helper.c b/target/s390x/mem_helper.c
index 69a16867d4..a1652d4849 100644
--- a/target/s390x/mem_helper.c
+++ b/target/s390x/mem_helper.c
@@ -1361,8 +1361,8 @@ uint32_t HELPER(trXX)(CPUS390XState *env, uint32_t r1, uint32_t r2,
return cc;
}
-void HELPER(cdsg)(CPUS390XState *env, uint64_t addr,
- uint32_t r1, uint32_t r3)
+static void do_cdsg(CPUS390XState *env, uint64_t addr,
+ uint32_t r1, uint32_t r3, bool parallel)
{
uintptr_t ra = GETPC();
Int128 cmpv = int128_make128(env->regs[r1 + 1], env->regs[r1]);
@@ -1370,7 +1370,7 @@ void HELPER(cdsg)(CPUS390XState *env, uint64_t addr,
Int128 oldv;
bool fail;
- if (parallel_cpus) {
+ if (parallel) {
#ifndef CONFIG_ATOMIC128
cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
#else
@@ -1402,7 +1402,20 @@ void HELPER(cdsg)(CPUS390XState *env, uint64_t addr,
env->regs[r1 + 1] = int128_getlo(oldv);
}
-uint32_t HELPER(csst)(CPUS390XState *env, uint32_t r3, uint64_t a1, uint64_t a2)
+void HELPER(cdsg)(CPUS390XState *env, uint64_t addr,
+ uint32_t r1, uint32_t r3)
+{
+ do_cdsg(env, addr, r1, r3, false);
+}
+
+void HELPER(cdsg_parallel)(CPUS390XState *env, uint64_t addr,
+ uint32_t r1, uint32_t r3)
+{
+ do_cdsg(env, addr, r1, r3, true);
+}
+
+static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
+ uint64_t a2, bool parallel)
{
#if !defined(CONFIG_USER_ONLY) || defined(CONFIG_ATOMIC128)
uint32_t mem_idx = cpu_mmu_index(env, false);
@@ -1438,7 +1451,7 @@ uint32_t HELPER(csst)(CPUS390XState *env, uint32_t r3, uint64_t a1, uint64_t a2)
the complete operation is not. Therefore we do not need to assert serial
context in order to implement this. That said, restart early if we can't
support either operation that is supposed to be atomic. */
- if (parallel_cpus) {
+ if (parallel) {
int mask = 0;
#if !defined(CONFIG_ATOMIC64)
mask = -8;
@@ -1462,7 +1475,7 @@ uint32_t HELPER(csst)(CPUS390XState *env, uint32_t r3, uint64_t a1, uint64_t a2)
uint32_t cv = env->regs[r3];
uint32_t ov;
- if (parallel_cpus) {
+ if (parallel) {
#ifdef CONFIG_USER_ONLY
uint32_t *haddr = g2h(a1);
ov = atomic_cmpxchg__nocheck(haddr, cv, nv);
@@ -1485,7 +1498,7 @@ uint32_t HELPER(csst)(CPUS390XState *env, uint32_t r3, uint64_t a1, uint64_t a2)
uint64_t cv = env->regs[r3];
uint64_t ov;
- if (parallel_cpus) {
+ if (parallel) {
#ifdef CONFIG_ATOMIC64
# ifdef CONFIG_USER_ONLY
uint64_t *haddr = g2h(a1);
@@ -1495,7 +1508,7 @@ uint32_t HELPER(csst)(CPUS390XState *env, uint32_t r3, uint64_t a1, uint64_t a2)
ov = helper_atomic_cmpxchgq_be_mmu(env, a1, cv, nv, oi, ra);
# endif
#else
- /* Note that we asserted !parallel_cpus above. */
+ /* Note that we asserted !parallel above. */
g_assert_not_reached();
#endif
} else {
@@ -1515,13 +1528,13 @@ uint32_t HELPER(csst)(CPUS390XState *env, uint32_t r3, uint64_t a1, uint64_t a2)
Int128 cv = int128_make128(env->regs[r3 + 1], env->regs[r3]);
Int128 ov;
- if (parallel_cpus) {
+ if (parallel) {
#ifdef CONFIG_ATOMIC128
TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
ov = helper_atomic_cmpxchgo_be_mmu(env, a1, cv, nv, oi, ra);
cc = !int128_eq(ov, cv);
#else
- /* Note that we asserted !parallel_cpus above. */
+ /* Note that we asserted !parallel above. */
g_assert_not_reached();
#endif
} else {
@@ -1565,13 +1578,13 @@ uint32_t HELPER(csst)(CPUS390XState *env, uint32_t r3, uint64_t a1, uint64_t a2)
cpu_stq_data_ra(env, a2, svh, ra);
break;
case 4:
- if (parallel_cpus) {
+ if (parallel) {
#ifdef CONFIG_ATOMIC128
TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
Int128 sv = int128_make128(svl, svh);
helper_atomic_sto_be_mmu(env, a2, sv, oi, ra);
#else
- /* Note that we asserted !parallel_cpus above. */
+ /* Note that we asserted !parallel above. */
g_assert_not_reached();
#endif
} else {
@@ -1592,6 +1605,17 @@ uint32_t HELPER(csst)(CPUS390XState *env, uint32_t r3, uint64_t a1, uint64_t a2)
g_assert_not_reached();
}
+uint32_t HELPER(csst)(CPUS390XState *env, uint32_t r3, uint64_t a1, uint64_t a2)
+{
+ return do_csst(env, r3, a1, a2, false);
+}
+
+uint32_t HELPER(csst_parallel)(CPUS390XState *env, uint32_t r3, uint64_t a1,
+ uint64_t a2)
+{
+ return do_csst(env, r3, a1, a2, true);
+}
+
#if !defined(CONFIG_USER_ONLY)
void HELPER(lctlg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
{
@@ -2011,12 +2035,12 @@ uint64_t HELPER(lra)(CPUS390XState *env, uint64_t addr)
#endif
/* load pair from quadword */
-uint64_t HELPER(lpq)(CPUS390XState *env, uint64_t addr)
+static uint64_t do_lpq(CPUS390XState *env, uint64_t addr, bool parallel)
{
uintptr_t ra = GETPC();
uint64_t hi, lo;
- if (parallel_cpus) {
+ if (parallel) {
#ifndef CONFIG_ATOMIC128
cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
#else
@@ -2037,13 +2061,23 @@ uint64_t HELPER(lpq)(CPUS390XState *env, uint64_t addr)
return hi;
}
+uint64_t HELPER(lpq)(CPUS390XState *env, uint64_t addr)
+{
+ return do_lpq(env, addr, false);
+}
+
+uint64_t HELPER(lpq_parallel)(CPUS390XState *env, uint64_t addr)
+{
+ return do_lpq(env, addr, true);
+}
+
/* store pair to quadword */
-void HELPER(stpq)(CPUS390XState *env, uint64_t addr,
- uint64_t low, uint64_t high)
+static void do_stpq(CPUS390XState *env, uint64_t addr,
+ uint64_t low, uint64_t high, bool parallel)
{
uintptr_t ra = GETPC();
- if (parallel_cpus) {
+ if (parallel) {
#ifndef CONFIG_ATOMIC128
cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
#else
@@ -2061,6 +2095,18 @@ void HELPER(stpq)(CPUS390XState *env, uint64_t addr,
}
}
+void HELPER(stpq)(CPUS390XState *env, uint64_t addr,
+ uint64_t low, uint64_t high)
+{
+ do_stpq(env, addr, low, high, false);
+}
+
+void HELPER(stpq_parallel)(CPUS390XState *env, uint64_t addr,
+ uint64_t low, uint64_t high)
+{
+ do_stpq(env, addr, low, high, true);
+}
+
/* Execute instruction. This instruction executes an insn modified with
the contents of r1. It does not change the executed instruction in memory;
it does not change the program counter.
diff --git a/target/s390x/translate.c b/target/s390x/translate.c
index 1ccdb35df2..dee72a787d 100644
--- a/target/s390x/translate.c
+++ b/target/s390x/translate.c
@@ -37,10 +37,6 @@
#include "qemu/log.h"
#include "qemu/host-utils.h"
#include "exec/cpu_ldst.h"
-
-/* global register indexes */
-static TCGv_env cpu_env;
-
#include "exec/gen-icount.h"
#include "exec/helper-proto.h"
#include "exec/helper-gen.h"
@@ -112,8 +108,6 @@ void s390x_translate_init(void)
{
int i;
- cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
- tcg_ctx.tcg_env = cpu_env;
psw_addr = tcg_global_mem_new_i64(cpu_env,
offsetof(CPUS390XState, psw.addr),
"psw_addr");
@@ -554,7 +548,7 @@ static void gen_op_calc_cc(DisasContext *s)
static bool use_exit_tb(DisasContext *s)
{
return (s->singlestep_enabled ||
- (s->tb->cflags & CF_LAST_IO) ||
+ (tb_cflags(s->tb) & CF_LAST_IO) ||
(s->tb->flags & FLAG_MASK_PER));
}
@@ -1966,7 +1960,11 @@ static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
addr = get_address(s, 0, b2, d2);
t_r1 = tcg_const_i32(r1);
t_r3 = tcg_const_i32(r3);
- gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
+ if (tb_cflags(s->tb) & CF_PARALLEL) {
+ gen_helper_cdsg_parallel(cpu_env, addr, t_r1, t_r3);
+ } else {
+ gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
+ }
tcg_temp_free_i64(addr);
tcg_temp_free_i32(t_r1);
tcg_temp_free_i32(t_r3);
@@ -1980,7 +1978,11 @@ static ExitStatus op_csst(DisasContext *s, DisasOps *o)
int r3 = get_field(s->fields, r3);
TCGv_i32 t_r3 = tcg_const_i32(r3);
- gen_helper_csst(cc_op, cpu_env, t_r3, o->in1, o->in2);
+ if (tb_cflags(s->tb) & CF_PARALLEL) {
+ gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->in1, o->in2);
+ } else {
+ gen_helper_csst(cc_op, cpu_env, t_r3, o->in1, o->in2);
+ }
tcg_temp_free_i32(t_r3);
set_cc_static(s);
@@ -2939,7 +2941,7 @@ static ExitStatus op_lpd(DisasContext *s, DisasOps *o)
TCGMemOp mop = s->insn->data;
/* In a parallel context, stop the world and single step. */
- if (parallel_cpus) {
+ if (tb_cflags(s->tb) & CF_PARALLEL) {
potential_page_fault(s);
gen_exception(EXCP_ATOMIC);
return EXIT_NORETURN;
@@ -2960,7 +2962,11 @@ static ExitStatus op_lpd(DisasContext *s, DisasOps *o)
static ExitStatus op_lpq(DisasContext *s, DisasOps *o)
{
- gen_helper_lpq(o->out, cpu_env, o->in2);
+ if (tb_cflags(s->tb) & CF_PARALLEL) {
+ gen_helper_lpq_parallel(o->out, cpu_env, o->in2);
+ } else {
+ gen_helper_lpq(o->out, cpu_env, o->in2);
+ }
return_low128(o->out2);
return NO_EXIT;
}
@@ -4281,7 +4287,11 @@ static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
static ExitStatus op_stpq(DisasContext *s, DisasOps *o)
{
- gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
+ if (tb_cflags(s->tb) & CF_PARALLEL) {
+ gen_helper_stpq_parallel(cpu_env, o->in2, o->out2, o->out);
+ } else {
+ gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
+ }
return NO_EXIT;
}
@@ -5883,7 +5893,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
num_insns = 0;
- max_insns = tb->cflags & CF_COUNT_MASK;
+ max_insns = tb_cflags(tb) & CF_COUNT_MASK;
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
}
@@ -5908,7 +5918,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
break;
}
- if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
+ if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
gen_io_start();
}
@@ -5927,7 +5937,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
}
} while (status == NO_EXIT);
- if (tb->cflags & CF_LAST_IO) {
+ if (tb_cflags(tb) & CF_LAST_IO) {
gen_io_end();
}
diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c
index 252440e019..89abce2472 100644
--- a/target/sh4/cpu.c
+++ b/target/sh4/cpu.c
@@ -258,10 +258,6 @@ static void superh_cpu_initfn(Object *obj)
cs->env_ptr = env;
env->movcal_backup_tail = &(env->movcal_backup);
-
- if (tcg_enabled()) {
- sh4_translate_init();
- }
}
static const VMStateDescription vmstate_sh_cpu = {
@@ -297,6 +293,7 @@ static void superh_cpu_class_init(ObjectClass *oc, void *data)
cc->get_phys_page_debug = superh_cpu_get_phys_page_debug;
#endif
cc->disas_set_info = superh_cpu_disas_set_info;
+ cc->tcg_initialize = sh4_translate_init;
cc->gdb_num_core_regs = 59;
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
index 27067cbd30..703020fe87 100644
--- a/target/sh4/translate.c
+++ b/target/sh4/translate.c
@@ -65,7 +65,6 @@ enum {
};
/* global register indexes */
-static TCGv_env cpu_env;
static TCGv cpu_gregs[32];
static TCGv cpu_sr, cpu_sr_m, cpu_sr_q, cpu_sr_t;
static TCGv cpu_pc, cpu_ssr, cpu_spc, cpu_gbr;
@@ -81,7 +80,6 @@ static TCGv cpu_flags, cpu_delayed_pc, cpu_delayed_cond;
void sh4_translate_init(void)
{
int i;
- static int done_init = 0;
static const char * const gregnames[24] = {
"R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
"R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
@@ -100,13 +98,6 @@ void sh4_translate_init(void)
"FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
};
- if (done_init) {
- return;
- }
-
- cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
- tcg_ctx.tcg_env = cpu_env;
-
for (i = 0; i < 24; i++) {
cpu_gregs[i] = tcg_global_mem_new_i32(cpu_env,
offsetof(CPUSH4State, gregs[i]),
@@ -163,8 +154,6 @@ void sh4_translate_init(void)
cpu_fregs[i] = tcg_global_mem_new_i32(cpu_env,
offsetof(CPUSH4State, fregs[i]),
fregnames[i]);
-
- done_init = 1;
}
void superh_cpu_dump_state(CPUState *cs, FILE *f,
@@ -528,7 +517,7 @@ static void _decode_opc(DisasContext * ctx)
/* Detect the start of a gUSA region. If so, update envflags
and end the TB. This will allow us to see the end of the
region (stored in R0) in the next TB. */
- if (B11_8 == 15 && B7_0s < 0 && parallel_cpus) {
+ if (B11_8 == 15 && B7_0s < 0 && (tb_cflags(ctx->tb) & CF_PARALLEL)) {
ctx->envflags = deposit32(ctx->envflags, GUSA_SHIFT, 8, B7_0s);
ctx->bstate = BS_STOP;
}
@@ -2255,7 +2244,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
(ctx.tbflags & (1 << SR_RB))) * 0x10;
ctx.fbank = ctx.tbflags & FPSCR_FR ? 0x10 : 0;
- max_insns = tb->cflags & CF_COUNT_MASK;
+ max_insns = tb_cflags(tb) & CF_COUNT_MASK;
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
}
@@ -2299,7 +2288,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
break;
}
- if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
+ if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
gen_io_start();
}
@@ -2307,7 +2296,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
decode_opc(&ctx);
ctx.pc += 2;
}
- if (tb->cflags & CF_LAST_IO) {
+ if (tb_cflags(tb) & CF_LAST_IO) {
gen_io_end();
}
diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
index beab90f3e6..47d0927707 100644
--- a/target/sparc/cpu.c
+++ b/target/sparc/cpu.c
@@ -784,10 +784,6 @@ static void sparc_cpu_initfn(Object *obj)
cs->env_ptr = env;
- if (tcg_enabled()) {
- gen_intermediate_code_init(env);
- }
-
if (scc->cpu_def) {
env->def = *scc->cpu_def;
}
@@ -891,6 +887,7 @@ static void sparc_cpu_class_init(ObjectClass *oc, void *data)
cc->vmsd = &vmstate_sparc_cpu;
#endif
cc->disas_set_info = cpu_sparc_disas_set_info;
+ cc->tcg_initialize = sparc_tcg_init;
#if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
cc->gdb_num_core_regs = 86;
diff --git a/target/sparc/cpu.h b/target/sparc/cpu.h
index 1598f65927..bf2b8931cc 100644
--- a/target/sparc/cpu.h
+++ b/target/sparc/cpu.h
@@ -594,7 +594,7 @@ int sparc_cpu_memory_rw_debug(CPUState *cpu, vaddr addr,
/* translate.c */
-void gen_intermediate_code_init(CPUSPARCState *env);
+void sparc_tcg_init(void);
/* cpu-exec.c */
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
index e89b6227f2..849a02aebd 100644
--- a/target/sparc/translate.c
+++ b/target/sparc/translate.c
@@ -41,7 +41,6 @@
according to jump_pc[T2] */
/* global register indexes */
-static TCGv_env cpu_env;
static TCGv_ptr cpu_regwptr;
static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst;
static TCGv_i32 cpu_cc_op;
@@ -171,18 +170,13 @@ static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
return TCGV_HIGH(cpu_fpr[src / 2]);
}
#else
+ TCGv_i32 ret = get_temp_i32(dc);
if (src & 1) {
- return MAKE_TCGV_I32(GET_TCGV_I64(cpu_fpr[src / 2]));
+ tcg_gen_extrl_i64_i32(ret, cpu_fpr[src / 2]);
} else {
- TCGv_i32 ret = get_temp_i32(dc);
- TCGv_i64 t = tcg_temp_new_i64();
-
- tcg_gen_shri_i64(t, cpu_fpr[src / 2], 32);
- tcg_gen_extrl_i64_i32(ret, t);
- tcg_temp_free_i64(t);
-
- return ret;
+ tcg_gen_extrh_i64_i32(ret, cpu_fpr[src / 2]);
}
+ return ret;
#endif
}
@@ -195,7 +189,7 @@ static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
tcg_gen_mov_i32(TCGV_HIGH(cpu_fpr[dst / 2]), v);
}
#else
- TCGv_i64 t = MAKE_TCGV_I64(GET_TCGV_I32(v));
+ TCGv_i64 t = (TCGv_i64)v;
tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
(dst & 1 ? 0 : 32), 32);
#endif
@@ -2442,7 +2436,7 @@ static void gen_ldstub_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn)
default:
/* ??? In theory, this should be raise DAE_invalid_asi.
But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1. */
- if (parallel_cpus) {
+ if (tb_cflags(dc->tb) & CF_PARALLEL) {
gen_helper_exit_atomic(cpu_env);
} else {
TCGv_i32 r_asi = tcg_const_i32(da.asi);
@@ -5772,7 +5766,7 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock * tb)
#endif
num_insns = 0;
- max_insns = tb->cflags & CF_COUNT_MASK;
+ max_insns = tb_cflags(tb) & CF_COUNT_MASK;
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
}
@@ -5801,7 +5795,7 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock * tb)
goto exit_gen_loop;
}
- if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
+ if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
gen_io_start();
}
@@ -5828,7 +5822,7 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock * tb)
num_insns < max_insns);
exit_gen_loop:
- if (tb->cflags & CF_LAST_IO) {
+ if (tb_cflags(tb) & CF_LAST_IO) {
gen_io_end();
}
if (!dc->is_br) {
@@ -5862,9 +5856,8 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock * tb)
#endif
}
-void gen_intermediate_code_init(CPUSPARCState *env)
+void sparc_tcg_init(void)
{
- static int inited;
static const char gregnames[32][4] = {
"g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
"o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
@@ -5917,15 +5910,6 @@ void gen_intermediate_code_init(CPUSPARCState *env)
unsigned int i;
- /* init various static tables */
- if (inited) {
- return;
- }
- inited = 1;
-
- cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
- tcg_ctx.tcg_env = cpu_env;
-
cpu_regwptr = tcg_global_mem_new_ptr(cpu_env,
offsetof(CPUSPARCState, regwptr),
"regwptr");
diff --git a/target/tilegx/cpu.c b/target/tilegx/cpu.c
index 7345f5a8b5..2ef8ea7daa 100644
--- a/target/tilegx/cpu.c
+++ b/target/tilegx/cpu.c
@@ -103,14 +103,8 @@ static void tilegx_cpu_initfn(Object *obj)
CPUState *cs = CPU(obj);
TileGXCPU *cpu = TILEGX_CPU(obj);
CPUTLGState *env = &cpu->env;
- static bool tcg_initialized;
cs->env_ptr = env;
-
- if (tcg_enabled() && !tcg_initialized) {
- tcg_initialized = true;
- tilegx_tcg_init();
- }
}
static void tilegx_cpu_do_interrupt(CPUState *cs)
@@ -161,6 +155,7 @@ static void tilegx_cpu_class_init(ObjectClass *oc, void *data)
cc->set_pc = tilegx_cpu_set_pc;
cc->handle_mmu_fault = tilegx_cpu_handle_mmu_fault;
cc->gdb_num_core_regs = 0;
+ cc->tcg_initialize = tilegx_tcg_init;
}
static const TypeInfo tilegx_cpu_type_info = {
diff --git a/target/tilegx/translate.c b/target/tilegx/translate.c
index ace2830a84..d55549dabc 100644
--- a/target/tilegx/translate.c
+++ b/target/tilegx/translate.c
@@ -33,7 +33,6 @@
#define FMT64X "%016" PRIx64
-static TCGv_env cpu_env;
static TCGv cpu_pc;
static TCGv cpu_regs[TILEGX_R_COUNT];
@@ -2378,7 +2377,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
uint64_t pc_start = tb->pc;
uint64_t next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
int num_insns = 0;
- int max_insns = tb->cflags & CF_COUNT_MASK;
+ int max_insns = tb_cflags(tb) & CF_COUNT_MASK;
dc->pc = pc_start;
dc->mmuidx = 0;
@@ -2445,8 +2444,6 @@ void tilegx_tcg_init(void)
{
int i;
- cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
- tcg_ctx.tcg_env = cpu_env;
cpu_pc = tcg_global_mem_new_i64(cpu_env, offsetof(CPUTLGState, pc), "pc");
for (i = 0; i < TILEGX_R_COUNT; i++) {
cpu_regs[i] = tcg_global_mem_new_i64(cpu_env,
diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c
index 871eb35453..cd93806d47 100644
--- a/target/tricore/cpu.c
+++ b/target/tricore/cpu.c
@@ -109,10 +109,6 @@ static void tricore_cpu_initfn(Object *obj)
CPUTriCoreState *env = &cpu->env;
cs->env_ptr = env;
-
- if (tcg_enabled()) {
- tricore_tcg_init();
- }
}
static ObjectClass *tricore_cpu_class_by_name(const char *cpu_model)
@@ -182,6 +178,7 @@ static void tricore_cpu_class_init(ObjectClass *c, void *data)
cc->set_pc = tricore_cpu_set_pc;
cc->synchronize_from_tb = tricore_cpu_synchronize_from_tb;
cc->get_phys_page_attrs_debug = tricore_cpu_get_phys_page_attrs_debug;
+ cc->tcg_initialize = tricore_tcg_init;
}
static void cpu_register(const TriCoreCPUInfo *info)
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
index e807500e26..4e5b083665 100644
--- a/target/tricore/translate.c
+++ b/target/tricore/translate.c
@@ -47,8 +47,6 @@ static TCGv cpu_PSW_V;
static TCGv cpu_PSW_SV;
static TCGv cpu_PSW_AV;
static TCGv cpu_PSW_SAV;
-/* CPU env */
-static TCGv_env cpu_env;
#include "exec/gen-icount.h"
@@ -8790,7 +8788,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
int num_insns, max_insns;
num_insns = 0;
- max_insns = tb->cflags & CF_COUNT_MASK;
+ max_insns = tb_cflags(tb) & CF_COUNT_MASK;
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
}
@@ -8880,12 +8878,7 @@ static void tricore_tcg_init_csfr(void)
void tricore_tcg_init(void)
{
int i;
- static int inited;
- if (inited) {
- return;
- }
- cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
- tcg_ctx.tcg_env = cpu_env;
+
/* reg init */
for (i = 0 ; i < 16 ; i++) {
cpu_gpr_a[i] = tcg_global_mem_new(cpu_env,
diff --git a/target/unicore32/cpu.c b/target/unicore32/cpu.c
index 138acc9dd8..526604ff78 100644
--- a/target/unicore32/cpu.c
+++ b/target/unicore32/cpu.c
@@ -117,7 +117,6 @@ static void uc32_cpu_initfn(Object *obj)
CPUState *cs = CPU(obj);
UniCore32CPU *cpu = UNICORE32_CPU(obj);
CPUUniCore32State *env = &cpu->env;
- static bool inited;
cs->env_ptr = env;
@@ -130,11 +129,6 @@ static void uc32_cpu_initfn(Object *obj)
#endif
tlb_flush(cs);
-
- if (tcg_enabled() && !inited) {
- inited = true;
- uc32_translate_init();
- }
}
static const VMStateDescription vmstate_uc32_cpu = {
@@ -162,6 +156,7 @@ static void uc32_cpu_class_init(ObjectClass *oc, void *data)
#else
cc->get_phys_page_debug = uc32_cpu_get_phys_page_debug;
#endif
+ cc->tcg_initialize = uc32_translate_init;
dc->vmsd = &vmstate_uc32_cpu;
}
diff --git a/target/unicore32/translate.c b/target/unicore32/translate.c
index f9aa248a80..384aa86027 100644
--- a/target/unicore32/translate.c
+++ b/target/unicore32/translate.c
@@ -54,7 +54,6 @@ typedef struct DisasContext {
conditional executions state has been updated. */
#define DISAS_SYSCALL DISAS_TARGET_3
-static TCGv_env cpu_env;
static TCGv_i32 cpu_R[32];
/* FIXME: These should be removed. */
@@ -74,9 +73,6 @@ void uc32_translate_init(void)
{
int i;
- cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
- tcg_ctx.tcg_env = cpu_env;
-
for (i = 0; i < 32; i++) {
cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
offsetof(CPUUniCore32State, regs[i]), regnames[i]);
@@ -1900,7 +1896,7 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
cpu_F1d = tcg_temp_new_i64();
next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
num_insns = 0;
- max_insns = tb->cflags & CF_COUNT_MASK;
+ max_insns = tb_cflags(tb) & CF_COUNT_MASK;
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
}
@@ -1933,7 +1929,7 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
goto done_generating;
}
- if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
+ if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
gen_io_start();
}
@@ -1958,7 +1954,7 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
dc->pc < next_page_start &&
num_insns < max_insns);
- if (tb->cflags & CF_LAST_IO) {
+ if (tb_cflags(tb) & CF_LAST_IO) {
if (dc->condjmp) {
/* FIXME: This can theoretically happen with self-modifying
code. */
diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c
index dcdc765a86..a5651e5dab 100644
--- a/target/xtensa/cpu.c
+++ b/target/xtensa/cpu.c
@@ -121,7 +121,6 @@ static void xtensa_cpu_initfn(Object *obj)
XtensaCPU *cpu = XTENSA_CPU(obj);
XtensaCPUClass *xcc = XTENSA_CPU_GET_CLASS(obj);
CPUXtensaState *env = &cpu->env;
- static bool tcg_inited;
cs->env_ptr = env;
env->config = xcc->config;
@@ -131,11 +130,6 @@ static void xtensa_cpu_initfn(Object *obj)
memory_region_init_io(env->system_er, NULL, NULL, env, "er",
UINT64_C(0x100000000));
address_space_init(env->address_space_er, env->system_er, "ER");
-
- if (tcg_enabled() && !tcg_inited) {
- tcg_inited = true;
- xtensa_translate_init();
- }
}
static const VMStateDescription vmstate_xtensa_cpu = {
@@ -170,6 +164,7 @@ static void xtensa_cpu_class_init(ObjectClass *oc, void *data)
cc->do_unassigned_access = xtensa_cpu_do_unassigned_access;
#endif
cc->debug_excp_handler = xtensa_breakpoint_handler;
+ cc->tcg_initialize = xtensa_translate_init;
dc->vmsd = &vmstate_xtensa_cpu;
}
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
index 03719ce12b..20f7ddf042 100644
--- a/target/xtensa/translate.c
+++ b/target/xtensa/translate.c
@@ -77,7 +77,6 @@ typedef struct DisasContext {
unsigned cpenable;
} DisasContext;
-static TCGv_env cpu_env;
static TCGv_i32 cpu_pc;
static TCGv_i32 cpu_R[16];
static TCGv_i32 cpu_FR[16];
@@ -221,8 +220,6 @@ void xtensa_translate_init(void)
};
int i;
- cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
- tcg_ctx.tcg_env = cpu_env;
cpu_pc = tcg_global_mem_new_i32(cpu_env,
offsetof(CPUXtensaState, pc), "pc");
@@ -517,12 +514,12 @@ static bool gen_check_sr(DisasContext *dc, uint32_t sr, unsigned access)
static bool gen_rsr_ccount(DisasContext *dc, TCGv_i32 d, uint32_t sr)
{
- if (dc->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_update_ccount(cpu_env);
tcg_gen_mov_i32(d, cpu_SR[sr]);
- if (dc->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
gen_io_end();
return true;
}
@@ -702,11 +699,11 @@ static bool gen_wsr_cpenable(DisasContext *dc, uint32_t sr, TCGv_i32 v)
static void gen_check_interrupts(DisasContext *dc)
{
- if (dc->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_check_interrupts(cpu_env);
- if (dc->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
gen_io_end();
}
}
@@ -760,11 +757,11 @@ static bool gen_wsr_ps(DisasContext *dc, uint32_t sr, TCGv_i32 v)
static bool gen_wsr_ccount(DisasContext *dc, uint32_t sr, TCGv_i32 v)
{
- if (dc->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_wsr_ccount(cpu_env, v);
- if (dc->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
gen_io_end();
gen_jumpi_check_loop_end(dc, 0);
return true;
@@ -801,11 +798,11 @@ static bool gen_wsr_ccompare(DisasContext *dc, uint32_t sr, TCGv_i32 v)
tcg_gen_mov_i32(cpu_SR[sr], v);
tcg_gen_andi_i32(cpu_SR[INTSET], cpu_SR[INTSET], ~int_bit);
- if (dc->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_update_ccompare(cpu_env, tmp);
- if (dc->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
gen_io_end();
gen_jumpi_check_loop_end(dc, 0);
ret = true;
@@ -900,11 +897,11 @@ static void gen_waiti(DisasContext *dc, uint32_t imm4)
TCGv_i32 pc = tcg_const_i32(dc->next_pc);
TCGv_i32 intlevel = tcg_const_i32(imm4);
- if (dc->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_waiti(cpu_env, pc, intlevel);
- if (dc->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
gen_io_end();
}
tcg_temp_free(pc);
@@ -3126,7 +3123,7 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
CPUXtensaState *env = cs->env_ptr;
DisasContext dc;
int insn_count = 0;
- int max_insns = tb->cflags & CF_COUNT_MASK;
+ int max_insns = tb_cflags(tb) & CF_COUNT_MASK;
uint32_t pc_start = tb->pc;
uint32_t next_page_start =
(pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
@@ -3162,7 +3159,7 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
gen_tb_start(tb);
- if ((tb->cflags & CF_USE_ICOUNT) &&
+ if ((tb_cflags(tb) & CF_USE_ICOUNT) &&
(tb->flags & XTENSA_TBFLAG_YIELD)) {
tcg_gen_insn_start(dc.pc);
++insn_count;
@@ -3194,7 +3191,7 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
break;
}
- if (insn_count == max_insns && (tb->cflags & CF_LAST_IO)) {
+ if (insn_count == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
gen_io_start();
}
@@ -3235,7 +3232,7 @@ done:
tcg_temp_free(dc.next_icount);
}
- if (tb->cflags & CF_LAST_IO) {
+ if (tb_cflags(tb) & CF_LAST_IO) {
gen_io_end();
}