summaryrefslogtreecommitdiff
path: root/target-arm
diff options
context:
space:
mode:
Diffstat (limited to 'target-arm')
-rw-r--r--target-arm/Makefile.objs1
-rw-r--r--target-arm/cpu-qom.h19
-rw-r--r--target-arm/cpu.c23
-rw-r--r--target-arm/cpu.h170
-rw-r--r--target-arm/cpu64.c118
-rw-r--r--target-arm/gdbstub64.c73
-rw-r--r--target-arm/helper.c36
-rw-r--r--target-arm/machine.c8
-rw-r--r--target-arm/translate-a64.c139
-rw-r--r--target-arm/translate.c450
-rw-r--r--target-arm/translate.h49
11 files changed, 856 insertions, 230 deletions
diff --git a/target-arm/Makefile.objs b/target-arm/Makefile.objs
index 2d9f77fa9b..6453f5c011 100644
--- a/target-arm/Makefile.objs
+++ b/target-arm/Makefile.objs
@@ -5,3 +5,4 @@ obj-$(CONFIG_NO_KVM) += kvm-stub.o
obj-y += translate.o op_helper.o helper.o cpu.o
obj-y += neon_helper.o iwmmxt_helper.o
obj-y += gdbstub.o
+obj-$(TARGET_AARCH64) += cpu64.o translate-a64.o gdbstub64.o
diff --git a/target-arm/cpu-qom.h b/target-arm/cpu-qom.h
index 9f47baebf8..b55306a3c3 100644
--- a/target-arm/cpu-qom.h
+++ b/target-arm/cpu-qom.h
@@ -130,6 +130,18 @@ typedef struct ARMCPU {
uint32_t reset_auxcr;
} ARMCPU;
+#define TYPE_AARCH64_CPU "aarch64-cpu"
+#define AARCH64_CPU_CLASS(klass) \
+ OBJECT_CLASS_CHECK(AArch64CPUClass, (klass), TYPE_AARCH64_CPU)
+#define AARCH64_CPU_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(AArch64CPUClass, (obj), TYPE_AArch64_CPU)
+
+typedef struct AArch64CPUClass {
+ /*< private >*/
+ ARMCPUClass parent_class;
+ /*< public >*/
+} AArch64CPUClass;
+
static inline ARMCPU *arm_env_get_cpu(CPUARMState *env)
{
return container_of(env, ARMCPU, env);
@@ -161,4 +173,11 @@ int arm_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
void arm_gt_ptimer_cb(void *opaque);
void arm_gt_vtimer_cb(void *opaque);
+#ifdef TARGET_AARCH64
+void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
+ fprintf_function cpu_fprintf, int flags);
+int aarch64_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
+int aarch64_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+#endif
+
#endif
diff --git a/target-arm/cpu.c b/target-arm/cpu.c
index b2556c66b4..d40f2a7a4f 100644
--- a/target-arm/cpu.c
+++ b/target-arm/cpu.c
@@ -84,6 +84,11 @@ static void arm_cpu_reset(CPUState *s)
env->iwmmxt.cregs[ARM_IWMMXT_wCID] = 0x69051000 | 'Q';
}
+ if (arm_feature(env, ARM_FEATURE_AARCH64)) {
+ /* 64 bit CPUs always start in 64 bit mode */
+ env->aarch64 = 1;
+ }
+
#if defined(CONFIG_USER_ONLY)
env->uncached_cpsr = ARM_CPU_MODE_USR;
/* For user mode we must enable access to coprocessors */
@@ -108,7 +113,7 @@ static void arm_cpu_reset(CPUState *s)
modified flash and reset itself. However images
loaded via -kernel have not been copied yet, so load the
values directly from there. */
- env->regs[13] = ldl_p(rom);
+ env->regs[13] = ldl_p(rom) & 0xFFFFFFFC;
pc = ldl_p(rom + 4);
env->thumb = pc & 1;
env->regs[15] = pc & ~1;
@@ -288,8 +293,6 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
acc->parent_realize(dev, errp);
}
-/* CPU models */
-
static ObjectClass *arm_cpu_class_by_name(const char *cpu_model)
{
ObjectClass *oc;
@@ -309,6 +312,9 @@ static ObjectClass *arm_cpu_class_by_name(const char *cpu_model)
return oc;
}
+/* CPU models. These are not needed for the AArch64 linux-user build. */
+#if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64)
+
static void arm926_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
@@ -822,6 +828,7 @@ static void pxa270c5_initfn(Object *obj)
cpu->reset_sctlr = 0x00000078;
}
+#ifdef CONFIG_USER_ONLY
static void arm_any_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
@@ -832,8 +839,14 @@ static void arm_any_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
set_feature(&cpu->env, ARM_FEATURE_ARM_DIV);
set_feature(&cpu->env, ARM_FEATURE_V7MP);
+#ifdef TARGET_AARCH64
+ set_feature(&cpu->env, ARM_FEATURE_AARCH64);
+#endif
cpu->midr = 0xffffffff;
}
+#endif
+
+#endif /* !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64) */
typedef struct ARMCPUInfo {
const char *name;
@@ -842,6 +855,7 @@ typedef struct ARMCPUInfo {
} ARMCPUInfo;
static const ARMCPUInfo arm_cpus[] = {
+#if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64)
{ .name = "arm926", .initfn = arm926_initfn },
{ .name = "arm946", .initfn = arm946_initfn },
{ .name = "arm1026", .initfn = arm1026_initfn },
@@ -874,7 +888,10 @@ static const ARMCPUInfo arm_cpus[] = {
{ .name = "pxa270-b1", .initfn = pxa270b1_initfn },
{ .name = "pxa270-c0", .initfn = pxa270c0_initfn },
{ .name = "pxa270-c5", .initfn = pxa270c5_initfn },
+#ifdef CONFIG_USER_ONLY
{ .name = "any", .initfn = arm_any_initfn },
+#endif
+#endif
};
static void arm_cpu_class_init(ObjectClass *oc, void *data)
diff --git a/target-arm/cpu.h b/target-arm/cpu.h
index f2abdf37ce..2c56740bf6 100644
--- a/target-arm/cpu.h
+++ b/target-arm/cpu.h
@@ -19,13 +19,19 @@
#ifndef CPU_ARM_H
#define CPU_ARM_H
-#define TARGET_LONG_BITS 32
+#include "config.h"
-#define ELF_MACHINE EM_ARM
+#if defined(TARGET_AARCH64)
+ /* AArch64 definitions */
+# define TARGET_LONG_BITS 64
+# define ELF_MACHINE EM_AARCH64
+#else
+# define TARGET_LONG_BITS 32
+# define ELF_MACHINE EM_ARM
+#endif
#define CPUArchState struct CPUARMState
-#include "config.h"
#include "qemu-common.h"
#include "exec/cpu-defs.h"
@@ -97,6 +103,20 @@ typedef struct ARMGenericTimer {
typedef struct CPUARMState {
/* Regs for current mode. */
uint32_t regs[16];
+
+ /* 32/64 switch only happens when taking and returning from
+ * exceptions so the overlap semantics are taken care of then
+ * instead of having a complicated union.
+ */
+ /* Regs for A64 mode. */
+ uint64_t xregs[32];
+ uint64_t pc;
+ /* TODO: pstate doesn't correspond to an architectural register;
+ * it would be better modelled as the underlying fields.
+ */
+ uint32_t pstate;
+ uint32_t aarch64; /* 1 if CPU is in aarch64 state; inverse of PSTATE.nRW */
+
/* Frequently accessed CPSR bits are stored separately for efficiency.
This contains all the other bits. Use cpsr_{read,write} to access
the whole CPSR. */
@@ -175,6 +195,11 @@ typedef struct CPUARMState {
uint32_t c15_power_control; /* power control */
} cp15;
+ /* System registers (AArch64) */
+ struct {
+ uint64_t tpidr_el0;
+ } sr;
+
struct {
uint32_t other_sp;
uint32_t vecbase;
@@ -191,7 +216,22 @@ typedef struct CPUARMState {
/* VFP coprocessor state. */
struct {
- float64 regs[32];
+ /* VFP/Neon register state. Note that the mapping between S, D and Q
+ * views of the register bank differs between AArch64 and AArch32:
+ * In AArch32:
+ * Qn = regs[2n+1]:regs[2n]
+ * Dn = regs[n]
+ * Sn = regs[n/2] bits 31..0 for even n, and bits 63..32 for odd n
+ * (and regs[32] to regs[63] are inaccessible)
+ * In AArch64:
+ * Qn = regs[2n+1]:regs[2n]
+ * Dn = regs[2n]
+ * Sn = regs[2n] bits 31..0
+ * This corresponds to the architecturally defined mapping between
+ * the two execution states, and means we do not need to explicitly
+ * map these registers when changing states.
+ */
+ float64 regs[64];
uint32_t xregs[16];
/* We store these fpcsr fields separately for convenience. */
@@ -261,6 +301,20 @@ int bank_number(int mode);
void switch_mode(CPUARMState *, int);
uint32_t do_arm_semihosting(CPUARMState *env);
+static inline bool is_a64(CPUARMState *env)
+{
+ return env->aarch64;
+}
+
+#define PSTATE_N_SHIFT 3
+#define PSTATE_N (1 << PSTATE_N_SHIFT)
+#define PSTATE_Z_SHIFT 2
+#define PSTATE_Z (1 << PSTATE_Z_SHIFT)
+#define PSTATE_C_SHIFT 1
+#define PSTATE_C (1 << PSTATE_C_SHIFT)
+#define PSTATE_V_SHIFT 0
+#define PSTATE_V (1 << PSTATE_V_SHIFT)
+
/* you can call this signal handler from your SIGBUS and SIGSEGV
signal handlers to inform the virtual CPU of exceptions. non zero
is returned if the signal was handled by the virtual CPU. */
@@ -270,22 +324,22 @@ int cpu_arm_handle_mmu_fault (CPUARMState *env, target_ulong address, int rw,
int mmu_idx);
#define cpu_handle_mmu_fault cpu_arm_handle_mmu_fault
-#define CPSR_M (0x1f)
-#define CPSR_T (1 << 5)
-#define CPSR_F (1 << 6)
-#define CPSR_I (1 << 7)
-#define CPSR_A (1 << 8)
-#define CPSR_E (1 << 9)
-#define CPSR_IT_2_7 (0xfc00)
-#define CPSR_GE (0xf << 16)
-#define CPSR_RESERVED (0xf << 20)
-#define CPSR_J (1 << 24)
-#define CPSR_IT_0_1 (3 << 25)
-#define CPSR_Q (1 << 27)
-#define CPSR_V (1 << 28)
-#define CPSR_C (1 << 29)
-#define CPSR_Z (1 << 30)
-#define CPSR_N (1 << 31)
+#define CPSR_M (0x1fU)
+#define CPSR_T (1U << 5)
+#define CPSR_F (1U << 6)
+#define CPSR_I (1U << 7)
+#define CPSR_A (1U << 8)
+#define CPSR_E (1U << 9)
+#define CPSR_IT_2_7 (0xfc00U)
+#define CPSR_GE (0xfU << 16)
+#define CPSR_RESERVED (0xfU << 20)
+#define CPSR_J (1U << 24)
+#define CPSR_IT_0_1 (3U << 25)
+#define CPSR_Q (1U << 27)
+#define CPSR_V (1U << 28)
+#define CPSR_C (1U << 29)
+#define CPSR_Z (1U << 30)
+#define CPSR_N (1U << 31)
#define CPSR_NZCV (CPSR_N | CPSR_Z | CPSR_C | CPSR_V)
#define CPSR_IT (CPSR_IT_0_1 | CPSR_IT_2_7)
@@ -409,6 +463,7 @@ enum arm_features {
ARM_FEATURE_PXN, /* has Privileged Execute Never bit */
ARM_FEATURE_LPAE, /* has Large Physical Address Extension */
ARM_FEATURE_V8,
+ ARM_FEATURE_AARCH64, /* supports 64 bit mode */
};
static inline int arm_feature(CPUARMState *env, int feature)
@@ -729,8 +784,13 @@ bool write_cpustate_to_list(ARMCPU *cpu);
#define TARGET_PAGE_BITS 10
#endif
-#define TARGET_PHYS_ADDR_SPACE_BITS 40
-#define TARGET_VIRT_ADDR_SPACE_BITS 32
+#if defined(TARGET_AARCH64)
+# define TARGET_PHYS_ADDR_SPACE_BITS 48
+# define TARGET_VIRT_ADDR_SPACE_BITS 64
+#else
+# define TARGET_PHYS_ADDR_SPACE_BITS 40
+# define TARGET_VIRT_ADDR_SPACE_BITS 32
+#endif
static inline CPUARMState *cpu_init(const char *cpu_model)
{
@@ -757,7 +817,13 @@ static inline int cpu_mmu_index (CPUARMState *env)
#include "exec/cpu-all.h"
-/* Bit usage in the TB flags field: */
+/* Bit usage in the TB flags field: bit 31 indicates whether we are
+ * in 32 or 64 bit mode. The meaning of the other bits depends on that.
+ */
+#define ARM_TBFLAG_AARCH64_STATE_SHIFT 31
+#define ARM_TBFLAG_AARCH64_STATE_MASK (1U << ARM_TBFLAG_AARCH64_STATE_SHIFT)
+
+/* Bit usage when in AArch32 state: */
#define ARM_TBFLAG_THUMB_SHIFT 0
#define ARM_TBFLAG_THUMB_MASK (1 << ARM_TBFLAG_THUMB_SHIFT)
#define ARM_TBFLAG_VECLEN_SHIFT 1
@@ -772,9 +838,12 @@ static inline int cpu_mmu_index (CPUARMState *env)
#define ARM_TBFLAG_CONDEXEC_MASK (0xff << ARM_TBFLAG_CONDEXEC_SHIFT)
#define ARM_TBFLAG_BSWAP_CODE_SHIFT 16
#define ARM_TBFLAG_BSWAP_CODE_MASK (1 << ARM_TBFLAG_BSWAP_CODE_SHIFT)
-/* Bits 31..17 are currently unused. */
+
+/* Bit usage when in AArch64 state: currently no bits defined */
/* some convenience accessor macros */
+#define ARM_TBFLAG_AARCH64_STATE(F) \
+ (((F) & ARM_TBFLAG_AARCH64_STATE_MASK) >> ARM_TBFLAG_AARCH64_STATE_SHIFT)
#define ARM_TBFLAG_THUMB(F) \
(((F) & ARM_TBFLAG_THUMB_MASK) >> ARM_TBFLAG_THUMB_SHIFT)
#define ARM_TBFLAG_VECLEN(F) \
@@ -793,25 +862,31 @@ static inline int cpu_mmu_index (CPUARMState *env)
static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
target_ulong *cs_base, int *flags)
{
- int privmode;
- *pc = env->regs[15];
- *cs_base = 0;
- *flags = (env->thumb << ARM_TBFLAG_THUMB_SHIFT)
- | (env->vfp.vec_len << ARM_TBFLAG_VECLEN_SHIFT)
- | (env->vfp.vec_stride << ARM_TBFLAG_VECSTRIDE_SHIFT)
- | (env->condexec_bits << ARM_TBFLAG_CONDEXEC_SHIFT)
- | (env->bswap_code << ARM_TBFLAG_BSWAP_CODE_SHIFT);
- if (arm_feature(env, ARM_FEATURE_M)) {
- privmode = !((env->v7m.exception == 0) && (env->v7m.control & 1));
+ if (is_a64(env)) {
+ *pc = env->pc;
+ *flags = ARM_TBFLAG_AARCH64_STATE_MASK;
} else {
- privmode = (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR;
- }
- if (privmode) {
- *flags |= ARM_TBFLAG_PRIV_MASK;
- }
- if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) {
- *flags |= ARM_TBFLAG_VFPEN_MASK;
+ int privmode;
+ *pc = env->regs[15];
+ *flags = (env->thumb << ARM_TBFLAG_THUMB_SHIFT)
+ | (env->vfp.vec_len << ARM_TBFLAG_VECLEN_SHIFT)
+ | (env->vfp.vec_stride << ARM_TBFLAG_VECSTRIDE_SHIFT)
+ | (env->condexec_bits << ARM_TBFLAG_CONDEXEC_SHIFT)
+ | (env->bswap_code << ARM_TBFLAG_BSWAP_CODE_SHIFT);
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ privmode = !((env->v7m.exception == 0) && (env->v7m.control & 1));
+ } else {
+ privmode = (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR;
+ }
+ if (privmode) {
+ *flags |= ARM_TBFLAG_PRIV_MASK;
+ }
+ if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) {
+ *flags |= ARM_TBFLAG_VFPEN_MASK;
+ }
}
+
+ *cs_base = 0;
}
static inline bool cpu_has_work(CPUState *cpu)
@@ -822,8 +897,17 @@ static inline bool cpu_has_work(CPUState *cpu)
#include "exec/exec-all.h"
+static inline void cpu_pc_from_tb(CPUARMState *env, TranslationBlock *tb)
+{
+ if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
+ env->pc = tb->pc;
+ } else {
+ env->regs[15] = tb->pc;
+ }
+}
+
/* Load an instruction and return it in the standard little-endian order */
-static inline uint32_t arm_ldl_code(CPUARMState *env, uint32_t addr,
+static inline uint32_t arm_ldl_code(CPUARMState *env, target_ulong addr,
bool do_swap)
{
uint32_t insn = cpu_ldl_code(env, addr);
@@ -834,7 +918,7 @@ static inline uint32_t arm_ldl_code(CPUARMState *env, uint32_t addr,
}
/* Ditto, for a halfword (Thumb) instruction */
-static inline uint16_t arm_lduw_code(CPUARMState *env, uint32_t addr,
+static inline uint16_t arm_lduw_code(CPUARMState *env, target_ulong addr,
bool do_swap)
{
uint16_t insn = cpu_lduw_code(env, addr);
diff --git a/target-arm/cpu64.c b/target-arm/cpu64.c
new file mode 100644
index 0000000000..3e99c2140a
--- /dev/null
+++ b/target-arm/cpu64.c
@@ -0,0 +1,118 @@
+/*
+ * QEMU AArch64 CPU
+ *
+ * Copyright (c) 2013 Linaro Ltd
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
+ */
+
+#include "cpu.h"
+#include "qemu-common.h"
+#if !defined(CONFIG_USER_ONLY)
+#include "hw/loader.h"
+#endif
+#include "hw/arm/arm.h"
+#include "sysemu/sysemu.h"
+#include "sysemu/kvm.h"
+
+static inline void set_feature(CPUARMState *env, int feature)
+{
+ env->features |= 1ULL << feature;
+}
+
+#ifdef CONFIG_USER_ONLY
+static void aarch64_any_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ set_feature(&cpu->env, ARM_FEATURE_V8);
+ set_feature(&cpu->env, ARM_FEATURE_VFP4);
+ set_feature(&cpu->env, ARM_FEATURE_VFP_FP16);
+ set_feature(&cpu->env, ARM_FEATURE_NEON);
+ set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
+ set_feature(&cpu->env, ARM_FEATURE_ARM_DIV);
+ set_feature(&cpu->env, ARM_FEATURE_V7MP);
+ set_feature(&cpu->env, ARM_FEATURE_AARCH64);
+}
+#endif
+
+typedef struct ARMCPUInfo {
+ const char *name;
+ void (*initfn)(Object *obj);
+ void (*class_init)(ObjectClass *oc, void *data);
+} ARMCPUInfo;
+
+static const ARMCPUInfo aarch64_cpus[] = {
+#ifdef CONFIG_USER_ONLY
+ { .name = "any", .initfn = aarch64_any_initfn },
+#endif
+};
+
+static void aarch64_cpu_initfn(Object *obj)
+{
+}
+
+static void aarch64_cpu_finalizefn(Object *obj)
+{
+}
+
+static void aarch64_cpu_class_init(ObjectClass *oc, void *data)
+{
+ CPUClass *cc = CPU_CLASS(oc);
+
+ cc->dump_state = aarch64_cpu_dump_state;
+ cc->gdb_read_register = aarch64_cpu_gdb_read_register;
+ cc->gdb_write_register = aarch64_cpu_gdb_write_register;
+ cc->gdb_num_core_regs = 34;
+ cc->gdb_core_xml_file = "aarch64-core.xml";
+}
+
+static void aarch64_cpu_register(const ARMCPUInfo *info)
+{
+ TypeInfo type_info = {
+ .parent = TYPE_AARCH64_CPU,
+ .instance_size = sizeof(ARMCPU),
+ .instance_init = info->initfn,
+ .class_size = sizeof(ARMCPUClass),
+ .class_init = info->class_init,
+ };
+
+ type_info.name = g_strdup_printf("%s-" TYPE_ARM_CPU, info->name);
+ type_register(&type_info);
+ g_free((void *)type_info.name);
+}
+
+static const TypeInfo aarch64_cpu_type_info = {
+ .name = TYPE_AARCH64_CPU,
+ .parent = TYPE_ARM_CPU,
+ .instance_size = sizeof(ARMCPU),
+ .instance_init = aarch64_cpu_initfn,
+ .instance_finalize = aarch64_cpu_finalizefn,
+ .abstract = true,
+ .class_size = sizeof(AArch64CPUClass),
+ .class_init = aarch64_cpu_class_init,
+};
+
+static void aarch64_cpu_register_types(void)
+{
+ int i;
+
+ type_register_static(&aarch64_cpu_type_info);
+ for (i = 0; i < ARRAY_SIZE(aarch64_cpus); i++) {
+ aarch64_cpu_register(&aarch64_cpus[i]);
+ }
+}
+
+type_init(aarch64_cpu_register_types)
diff --git a/target-arm/gdbstub64.c b/target-arm/gdbstub64.c
new file mode 100644
index 0000000000..7cb6a7c0e0
--- /dev/null
+++ b/target-arm/gdbstub64.c
@@ -0,0 +1,73 @@
+/*
+ * ARM gdb server stub: AArch64 specific functions.
+ *
+ * Copyright (c) 2013 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "config.h"
+#include "qemu-common.h"
+#include "exec/gdbstub.h"
+
+int aarch64_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+
+ if (n < 31) {
+ /* Core integer register. */
+ return gdb_get_reg64(mem_buf, env->xregs[n]);
+ }
+ switch (n) {
+ case 31:
+ return gdb_get_reg64(mem_buf, env->xregs[31]);
+ break;
+ case 32:
+ return gdb_get_reg64(mem_buf, env->pc);
+ break;
+ case 33:
+ return gdb_get_reg32(mem_buf, env->pstate);
+ }
+ /* Unknown register. */
+ return 0;
+}
+
+int aarch64_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ uint64_t tmp;
+
+ tmp = ldq_p(mem_buf);
+
+ if (n < 31) {
+ /* Core integer register. */
+ env->xregs[n] = tmp;
+ return 8;
+ }
+ switch (n) {
+ case 31:
+ env->xregs[31] = tmp;
+ return 8;
+ case 32:
+ env->pc = tmp;
+ return 8;
+ case 33:
+ /* CPSR */
+ env->pstate = tmp;
+ return 4;
+ }
+ /* Unknown register. */
+ return 0;
+}
diff --git a/target-arm/helper.c b/target-arm/helper.c
index e51ef20aea..2a98be7436 100644
--- a/target-arm/helper.c
+++ b/target-arm/helper.c
@@ -2,6 +2,7 @@
#include "exec/gdbstub.h"
#include "helper.h"
#include "qemu/host-utils.h"
+#include "sysemu/arch_init.h"
#include "sysemu/sysemu.h"
#include "qemu/bitops.h"
@@ -972,7 +973,7 @@ static int par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
static inline bool extended_addresses_enabled(CPUARMState *env)
{
return arm_feature(env, ARM_FEATURE_LPAE)
- && (env->cp15.c2_control & (1 << 31));
+ && (env->cp15.c2_control & (1U << 31));
}
static int ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
@@ -1385,7 +1386,7 @@ static int mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri,
* so these bits always RAZ.
*/
if (arm_feature(env, ARM_FEATURE_V7MP)) {
- mpidr |= (1 << 31);
+ mpidr |= (1U << 31);
/* Cores which are uniprocessor (non-coherent)
* but still implement the MP extensions set
* bit 30. (For instance, A9UP.) However we do
@@ -1829,6 +1830,37 @@ void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf)
g_slist_free(list);
}
+static void arm_cpu_add_definition(gpointer data, gpointer user_data)
+{
+ ObjectClass *oc = data;
+ CpuDefinitionInfoList **cpu_list = user_data;
+ CpuDefinitionInfoList *entry;
+ CpuDefinitionInfo *info;
+ const char *typename;
+
+ typename = object_class_get_name(oc);
+ info = g_malloc0(sizeof(*info));
+ info->name = g_strndup(typename,
+ strlen(typename) - strlen("-" TYPE_ARM_CPU));
+
+ entry = g_malloc0(sizeof(*entry));
+ entry->value = info;
+ entry->next = *cpu_list;
+ *cpu_list = entry;
+}
+
+CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
+{
+ CpuDefinitionInfoList *cpu_list = NULL;
+ GSList *list;
+
+ list = object_class_get_list(TYPE_ARM_CPU, false);
+ g_slist_foreach(list, arm_cpu_add_definition, &cpu_list);
+ g_slist_free(list);
+
+ return cpu_list;
+}
+
void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
const ARMCPRegInfo *r, void *opaque)
{
diff --git a/target-arm/machine.c b/target-arm/machine.c
index 5b6f3754ca..74f010f637 100644
--- a/target-arm/machine.c
+++ b/target-arm/machine.c
@@ -37,11 +37,11 @@ static const VMStateInfo vmstate_fpscr = {
static const VMStateDescription vmstate_vfp = {
.name = "cpu/vfp",
- .version_id = 2,
- .minimum_version_id = 2,
- .minimum_version_id_old = 2,
+ .version_id = 3,
+ .minimum_version_id = 3,
+ .minimum_version_id_old = 3,
.fields = (VMStateField[]) {
- VMSTATE_FLOAT64_ARRAY(env.vfp.regs, ARMCPU, 32),
+ VMSTATE_FLOAT64_ARRAY(env.vfp.regs, ARMCPU, 64),
/* The xregs array is a little awkward because element 1 (FPSCR)
* requires a specific accessor, so we have to split it up in
* the vmstate:
diff --git a/target-arm/translate-a64.c b/target-arm/translate-a64.c
new file mode 100644
index 0000000000..f120088607
--- /dev/null
+++ b/target-arm/translate-a64.c
@@ -0,0 +1,139 @@
+/*
+ * AArch64 translation
+ *
+ * Copyright (c) 2013 Alexander Graf <agraf@suse.de>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include <stdarg.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <inttypes.h>
+
+#include "cpu.h"
+#include "tcg-op.h"
+#include "qemu/log.h"
+#include "translate.h"
+#include "qemu/host-utils.h"
+
+#include "helper.h"
+#define GEN_HELPER 1
+#include "helper.h"
+
+static TCGv_i64 cpu_X[32];
+static TCGv_i64 cpu_pc;
+static TCGv_i32 pstate;
+
+static const char *regnames[] = {
+ "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
+ "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
+ "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23",
+ "x24", "x25", "x26", "x27", "x28", "x29", "lr", "sp"
+};
+
+/* initialize TCG globals. */
+void a64_translate_init(void)
+{
+ int i;
+
+ cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
+ offsetof(CPUARMState, pc),
+ "pc");
+ for (i = 0; i < 32; i++) {
+ cpu_X[i] = tcg_global_mem_new_i64(TCG_AREG0,
+ offsetof(CPUARMState, xregs[i]),
+ regnames[i]);
+ }
+
+ pstate = tcg_global_mem_new_i32(TCG_AREG0,
+ offsetof(CPUARMState, pstate),
+ "pstate");
+}
+
+void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
+ fprintf_function cpu_fprintf, int flags)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ int i;
+
+ cpu_fprintf(f, "PC=%016"PRIx64" SP=%016"PRIx64"\n",
+ env->pc, env->xregs[31]);
+ for (i = 0; i < 31; i++) {
+ cpu_fprintf(f, "X%02d=%016"PRIx64, i, env->xregs[i]);
+ if ((i % 4) == 3) {
+ cpu_fprintf(f, "\n");
+ } else {
+ cpu_fprintf(f, " ");
+ }
+ }
+ cpu_fprintf(f, "PSTATE=%c%c%c%c\n",
+ env->pstate & PSTATE_N ? 'n' : '.',
+ env->pstate & PSTATE_Z ? 'z' : '.',
+ env->pstate & PSTATE_C ? 'c' : '.',
+ env->pstate & PSTATE_V ? 'v' : '.');
+ cpu_fprintf(f, "\n");
+}
+
+void gen_a64_set_pc_im(uint64_t val)
+{
+ tcg_gen_movi_i64(cpu_pc, val);
+}
+
+static void gen_exception(int excp)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_movi_i32(tmp, excp);
+ gen_helper_exception(cpu_env, tmp);
+ tcg_temp_free_i32(tmp);
+}
+
+static void gen_exception_insn(DisasContext *s, int offset, int excp)
+{
+ gen_a64_set_pc_im(s->pc - offset);
+ gen_exception(excp);
+ s->is_jmp = DISAS_JUMP;
+}
+
+static void real_unallocated_encoding(DisasContext *s)
+{
+ fprintf(stderr, "Unknown instruction: %#x\n", s->insn);
+ gen_exception_insn(s, 4, EXCP_UDEF);
+}
+
+#define unallocated_encoding(s) do { \
+ fprintf(stderr, "unallocated encoding at line: %d\n", __LINE__); \
+ real_unallocated_encoding(s); \
+ } while (0)
+
+void disas_a64_insn(CPUARMState *env, DisasContext *s)
+{
+ uint32_t insn;
+
+ insn = arm_ldl_code(env, s->pc, s->bswap_code);
+ s->insn = insn;
+ s->pc += 4;
+
+ switch ((insn >> 24) & 0x1f) {
+ default:
+ unallocated_encoding(s);
+ break;
+ }
+
+ if (unlikely(s->singlestep_enabled) && (s->is_jmp == DISAS_TB_JUMP)) {
+ /* go through the main loop for single step */
+ s->is_jmp = DISAS_JUMP;
+ }
+}
diff --git a/target-arm/translate.c b/target-arm/translate.c
index 4f4a0a97d2..998bde268d 100644
--- a/target-arm/translate.c
+++ b/target-arm/translate.c
@@ -28,6 +28,7 @@
#include "disas/disas.h"
#include "tcg-op.h"
#include "qemu/log.h"
+#include "qemu/bitops.h"
#include "helper.h"
#define GEN_HELPER 1
@@ -46,29 +47,7 @@
#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
-/* internal defines */
-typedef struct DisasContext {
- target_ulong pc;
- int is_jmp;
- /* Nonzero if this instruction has been conditionally skipped. */
- int condjmp;
- /* The label that will be jumped to when the instruction is skipped. */
- int condlabel;
- /* Thumb-2 conditional execution bits. */
- int condexec_mask;
- int condexec_cond;
- struct TranslationBlock *tb;
- int singlestep_enabled;
- int thumb;
- int bswap_code;
-#if !defined(CONFIG_USER_ONLY)
- int user;
-#endif
- int vfp_enabled;
- int vec_len;
- int vec_stride;
-} DisasContext;
-
+#include "translate.h"
static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
#if defined(CONFIG_USER_ONLY)
@@ -82,7 +61,7 @@ static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
#define DISAS_WFI 4
#define DISAS_SWI 5
-static TCGv_ptr cpu_env;
+TCGv_ptr cpu_env;
/* We reuse the same 64-bit temporaries for efficiency. */
static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
static TCGv_i32 cpu_R[16];
@@ -135,6 +114,8 @@ void arm_translate_init(void)
offsetof(CPUARMState, exclusive_info), "exclusive_info");
#endif
+ a64_translate_init();
+
#define GEN_HELPER 2
#include "helper.h"
}
@@ -842,9 +823,97 @@ static inline void store_reg_from_load(CPUARMState *env, DisasContext *s,
}
}
-static inline void gen_set_pc_im(uint32_t val)
+/* Abstractions of "generate code to do a guest load/store for
+ * AArch32", where a vaddr is always 32 bits (and is zero
+ * extended if we're a 64 bit core) and data is also
+ * 32 bits unless specifically doing a 64 bit access.
+ * These functions work like tcg_gen_qemu_{ld,st}* except
+ * that their arguments are TCGv_i32 rather than TCGv.
+ */
+#if TARGET_LONG_BITS == 32
+
+#define DO_GEN_LD(OP) \
+static inline void gen_aa32_##OP(TCGv_i32 val, TCGv_i32 addr, int index) \
+{ \
+ tcg_gen_qemu_##OP(val, addr, index); \
+}
+
+#define DO_GEN_ST(OP) \
+static inline void gen_aa32_##OP(TCGv_i32 val, TCGv_i32 addr, int index) \
+{ \
+ tcg_gen_qemu_##OP(val, addr, index); \
+}
+
+static inline void gen_aa32_ld64(TCGv_i64 val, TCGv_i32 addr, int index)
+{
+ tcg_gen_qemu_ld64(val, addr, index);
+}
+
+static inline void gen_aa32_st64(TCGv_i64 val, TCGv_i32 addr, int index)
+{
+ tcg_gen_qemu_st64(val, addr, index);
+}
+
+#else
+
+#define DO_GEN_LD(OP) \
+static inline void gen_aa32_##OP(TCGv_i32 val, TCGv_i32 addr, int index) \
+{ \
+ TCGv addr64 = tcg_temp_new(); \
+ TCGv val64 = tcg_temp_new(); \
+ tcg_gen_extu_i32_i64(addr64, addr); \
+ tcg_gen_qemu_##OP(val64, addr64, index); \
+ tcg_temp_free(addr64); \
+ tcg_gen_trunc_i64_i32(val, val64); \
+ tcg_temp_free(val64); \
+}
+
+#define DO_GEN_ST(OP) \
+static inline void gen_aa32_##OP(TCGv_i32 val, TCGv_i32 addr, int index) \
+{ \
+ TCGv addr64 = tcg_temp_new(); \
+ TCGv val64 = tcg_temp_new(); \
+ tcg_gen_extu_i32_i64(addr64, addr); \
+ tcg_gen_extu_i32_i64(val64, val); \
+ tcg_gen_qemu_##OP(val64, addr64, index); \
+ tcg_temp_free(addr64); \
+ tcg_temp_free(val64); \
+}
+
+static inline void gen_aa32_ld64(TCGv_i64 val, TCGv_i32 addr, int index)
+{
+ TCGv addr64 = tcg_temp_new();
+ tcg_gen_extu_i32_i64(addr64, addr);
+ tcg_gen_qemu_ld64(val, addr64, index);
+ tcg_temp_free(addr64);
+}
+
+static inline void gen_aa32_st64(TCGv_i64 val, TCGv_i32 addr, int index)
{
- tcg_gen_movi_i32(cpu_R[15], val);
+ TCGv addr64 = tcg_temp_new();
+ tcg_gen_extu_i32_i64(addr64, addr);
+ tcg_gen_qemu_st64(val, addr64, index);
+ tcg_temp_free(addr64);
+}
+
+#endif
+
+DO_GEN_LD(ld8s)
+DO_GEN_LD(ld8u)
+DO_GEN_LD(ld16s)
+DO_GEN_LD(ld16u)
+DO_GEN_LD(ld32u)
+DO_GEN_ST(st8)
+DO_GEN_ST(st16)
+DO_GEN_ST(st32)
+
+static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
+{
+ if (s->aarch64) {
+ gen_a64_set_pc_im(val);
+ } else {
+ tcg_gen_movi_i32(cpu_R[15], val);
+ }
}
/* Force a TB lookup after an instruction that changes the CPU state. */
@@ -1071,18 +1140,20 @@ VFP_GEN_FIX(ulto)
static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
{
- if (dp)
- tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
- else
- tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
+ if (dp) {
+ gen_aa32_ld64(cpu_F0d, addr, IS_USER(s));
+ } else {
+ gen_aa32_ld32u(cpu_F0s, addr, IS_USER(s));
+ }
}
static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
{
- if (dp)
- tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
- else
- tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
+ if (dp) {
+ gen_aa32_st64(cpu_F0d, addr, IS_USER(s));
+ } else {
+ gen_aa32_st32(cpu_F0s, addr, IS_USER(s));
+ }
}
static inline long
@@ -1419,24 +1490,24 @@ static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
if (insn & ARM_CP_RW_BIT) {
if ((insn >> 28) == 0xf) { /* WLDRW wCx */
tmp = tcg_temp_new_i32();
- tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, IS_USER(s));
iwmmxt_store_creg(wrd, tmp);
} else {
i = 1;
if (insn & (1 << 8)) {
if (insn & (1 << 22)) { /* WLDRD */
- tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
+ gen_aa32_ld64(cpu_M0, addr, IS_USER(s));
i = 0;
} else { /* WLDRW wRd */
tmp = tcg_temp_new_i32();
- tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, IS_USER(s));
}
} else {
tmp = tcg_temp_new_i32();
if (insn & (1 << 22)) { /* WLDRH */
- tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
+ gen_aa32_ld16u(tmp, addr, IS_USER(s));
} else { /* WLDRB */
- tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
+ gen_aa32_ld8u(tmp, addr, IS_USER(s));
}
}
if (i) {
@@ -1448,24 +1519,24 @@ static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
} else {
if ((insn >> 28) == 0xf) { /* WSTRW wCx */
tmp = iwmmxt_load_creg(wrd);
- tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, IS_USER(s));
} else {
gen_op_iwmmxt_movq_M0_wRn(wrd);
tmp = tcg_temp_new_i32();
if (insn & (1 << 8)) {
if (insn & (1 << 22)) { /* WSTRD */
- tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
+ gen_aa32_st64(cpu_M0, addr, IS_USER(s));
} else { /* WSTRW wRd */
tcg_gen_trunc_i64_i32(tmp, cpu_M0);
- tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, IS_USER(s));
}
} else {
if (insn & (1 << 22)) { /* WSTRH */
tcg_gen_trunc_i64_i32(tmp, cpu_M0);
- tcg_gen_qemu_st16(tmp, addr, IS_USER(s));
+ gen_aa32_st16(tmp, addr, IS_USER(s));
} else { /* WSTRB */
tcg_gen_trunc_i64_i32(tmp, cpu_M0);
- tcg_gen_qemu_st8(tmp, addr, IS_USER(s));
+ gen_aa32_st8(tmp, addr, IS_USER(s));
}
}
}
@@ -2530,15 +2601,15 @@ static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
TCGv_i32 tmp = tcg_temp_new_i32();
switch (size) {
case 0:
- tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
+ gen_aa32_ld8u(tmp, addr, IS_USER(s));
gen_neon_dup_u8(tmp, 0);
break;
case 1:
- tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
+ gen_aa32_ld16u(tmp, addr, IS_USER(s));
gen_neon_dup_low16(tmp);
break;
case 2:
- tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, IS_USER(s));
break;
default: /* Avoid compiler warnings. */
abort();
@@ -3348,17 +3419,17 @@ static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
return 0;
}
-static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
+static inline void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
{
TranslationBlock *tb;
tb = s->tb;
if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
tcg_gen_goto_tb(n);
- gen_set_pc_im(dest);
+ gen_set_pc_im(s, dest);
tcg_gen_exit_tb((uintptr_t)tb + n);
} else {
- gen_set_pc_im(dest);
+ gen_set_pc_im(s, dest);
tcg_gen_exit_tb(0);
}
}
@@ -3487,7 +3558,7 @@ gen_set_condexec (DisasContext *s)
static void gen_exception_insn(DisasContext *s, int offset, int excp)
{
gen_set_condexec(s);
- gen_set_pc_im(s->pc - offset);
+ gen_set_pc_im(s, s->pc - offset);
gen_exception(excp);
s->is_jmp = DISAS_JUMP;
}
@@ -3496,7 +3567,7 @@ static void gen_nop_hint(DisasContext *s, int val)
{
switch (val) {
case 3: /* wfi */
- gen_set_pc_im(s->pc);
+ gen_set_pc_im(s, s->pc);
s->is_jmp = DISAS_WFI;
break;
case 2: /* wfe */
@@ -3816,11 +3887,11 @@ static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
if (size == 3) {
tmp64 = tcg_temp_new_i64();
if (load) {
- tcg_gen_qemu_ld64(tmp64, addr, IS_USER(s));
+ gen_aa32_ld64(tmp64, addr, IS_USER(s));
neon_store_reg64(tmp64, rd);
} else {
neon_load_reg64(tmp64, rd);
- tcg_gen_qemu_st64(tmp64, addr, IS_USER(s));
+ gen_aa32_st64(tmp64, addr, IS_USER(s));
}
tcg_temp_free_i64(tmp64);
tcg_gen_addi_i32(addr, addr, stride);
@@ -3829,21 +3900,21 @@ static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
if (size == 2) {
if (load) {
tmp = tcg_temp_new_i32();
- tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, IS_USER(s));
neon_store_reg(rd, pass, tmp);
} else {
tmp = neon_load_reg(rd, pass);
- tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, IS_USER(s));
tcg_temp_free_i32(tmp);
}
tcg_gen_addi_i32(addr, addr, stride);
} else if (size == 1) {
if (load) {
tmp = tcg_temp_new_i32();
- tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
+ gen_aa32_ld16u(tmp, addr, IS_USER(s));
tcg_gen_addi_i32(addr, addr, stride);
tmp2 = tcg_temp_new_i32();
- tcg_gen_qemu_ld16u(tmp2, addr, IS_USER(s));
+ gen_aa32_ld16u(tmp2, addr, IS_USER(s));
tcg_gen_addi_i32(addr, addr, stride);
tcg_gen_shli_i32(tmp2, tmp2, 16);
tcg_gen_or_i32(tmp, tmp, tmp2);
@@ -3853,10 +3924,10 @@ static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
tmp = neon_load_reg(rd, pass);
tmp2 = tcg_temp_new_i32();
tcg_gen_shri_i32(tmp2, tmp, 16);
- tcg_gen_qemu_st16(tmp, addr, IS_USER(s));
+ gen_aa32_st16(tmp, addr, IS_USER(s));
tcg_temp_free_i32(tmp);
tcg_gen_addi_i32(addr, addr, stride);
- tcg_gen_qemu_st16(tmp2, addr, IS_USER(s));
+ gen_aa32_st16(tmp2, addr, IS_USER(s));
tcg_temp_free_i32(tmp2);
tcg_gen_addi_i32(addr, addr, stride);
}
@@ -3865,7 +3936,7 @@ static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
TCGV_UNUSED_I32(tmp2);
for (n = 0; n < 4; n++) {
tmp = tcg_temp_new_i32();
- tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
+ gen_aa32_ld8u(tmp, addr, IS_USER(s));
tcg_gen_addi_i32(addr, addr, stride);
if (n == 0) {
tmp2 = tmp;
@@ -3885,7 +3956,7 @@ static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
} else {
tcg_gen_shri_i32(tmp, tmp2, n * 8);
}
- tcg_gen_qemu_st8(tmp, addr, IS_USER(s));
+ gen_aa32_st8(tmp, addr, IS_USER(s));
tcg_temp_free_i32(tmp);
tcg_gen_addi_i32(addr, addr, stride);
}
@@ -4009,13 +4080,13 @@ static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
tmp = tcg_temp_new_i32();
switch (size) {
case 0:
- tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
+ gen_aa32_ld8u(tmp, addr, IS_USER(s));
break;
case 1:
- tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
+ gen_aa32_ld16u(tmp, addr, IS_USER(s));
break;
case 2:
- tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, IS_USER(s));
break;
default: /* Avoid compiler warnings. */
abort();
@@ -4033,13 +4104,13 @@ static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
tcg_gen_shri_i32(tmp, tmp, shift);
switch (size) {
case 0:
- tcg_gen_qemu_st8(tmp, addr, IS_USER(s));
+ gen_aa32_st8(tmp, addr, IS_USER(s));
break;
case 1:
- tcg_gen_qemu_st16(tmp, addr, IS_USER(s));
+ gen_aa32_st16(tmp, addr, IS_USER(s));
break;
case 2:
- tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, IS_USER(s));
break;
}
tcg_temp_free_i32(tmp);
@@ -6273,7 +6344,7 @@ static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
if (isread) {
return 1;
}
- gen_set_pc_im(s->pc);
+ gen_set_pc_im(s, s->pc);
s->is_jmp = DISAS_WFI;
return 0;
default:
@@ -6293,7 +6364,7 @@ static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
tmp64 = tcg_const_i64(ri->resetvalue);
} else if (ri->readfn) {
TCGv_ptr tmpptr;
- gen_set_pc_im(s->pc);
+ gen_set_pc_im(s, s->pc);
tmp64 = tcg_temp_new_i64();
tmpptr = tcg_const_ptr(ri);
gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
@@ -6316,7 +6387,7 @@ static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
tmp = tcg_const_i32(ri->resetvalue);
} else if (ri->readfn) {
TCGv_ptr tmpptr;
- gen_set_pc_im(s->pc);
+ gen_set_pc_im(s, s->pc);
tmp = tcg_temp_new_i32();
tmpptr = tcg_const_ptr(ri);
gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
@@ -6351,7 +6422,7 @@ static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
tcg_temp_free_i32(tmphi);
if (ri->writefn) {
TCGv_ptr tmpptr = tcg_const_ptr(ri);
- gen_set_pc_im(s->pc);
+ gen_set_pc_im(s, s->pc);
gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
tcg_temp_free_ptr(tmpptr);
} else {
@@ -6362,7 +6433,7 @@ static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
if (ri->writefn) {
TCGv_i32 tmp;
TCGv_ptr tmpptr;
- gen_set_pc_im(s->pc);
+ gen_set_pc_im(s, s->pc);
tmp = load_reg(s, rt);
tmpptr = tcg_const_ptr(ri);
gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
@@ -6463,14 +6534,14 @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
switch (size) {
case 0:
- tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
+ gen_aa32_ld8u(tmp, addr, IS_USER(s));
break;
case 1:
- tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
+ gen_aa32_ld16u(tmp, addr, IS_USER(s));
break;
case 2:
case 3:
- tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, IS_USER(s));
break;
default:
abort();
@@ -6481,7 +6552,7 @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
TCGv_i32 tmp2 = tcg_temp_new_i32();
tcg_gen_addi_i32(tmp2, addr, 4);
tmp = tcg_temp_new_i32();
- tcg_gen_qemu_ld32u(tmp, tmp2, IS_USER(s));
+ gen_aa32_ld32u(tmp, tmp2, IS_USER(s));
tcg_temp_free_i32(tmp2);
tcg_gen_mov_i32(cpu_exclusive_high, tmp);
store_reg(s, rt2, tmp);
@@ -6523,14 +6594,14 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
tmp = tcg_temp_new_i32();
switch (size) {
case 0:
- tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
+ gen_aa32_ld8u(tmp, addr, IS_USER(s));
break;
case 1:
- tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
+ gen_aa32_ld16u(tmp, addr, IS_USER(s));
break;
case 2:
case 3:
- tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, IS_USER(s));
break;
default:
abort();
@@ -6541,7 +6612,7 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
TCGv_i32 tmp2 = tcg_temp_new_i32();
tcg_gen_addi_i32(tmp2, addr, 4);
tmp = tcg_temp_new_i32();
- tcg_gen_qemu_ld32u(tmp, tmp2, IS_USER(s));
+ gen_aa32_ld32u(tmp, tmp2, IS_USER(s));
tcg_temp_free_i32(tmp2);
tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
tcg_temp_free_i32(tmp);
@@ -6549,14 +6620,14 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
tmp = load_reg(s, rt);
switch (size) {
case 0:
- tcg_gen_qemu_st8(tmp, addr, IS_USER(s));
+ gen_aa32_st8(tmp, addr, IS_USER(s));
break;
case 1:
- tcg_gen_qemu_st16(tmp, addr, IS_USER(s));
+ gen_aa32_st16(tmp, addr, IS_USER(s));
break;
case 2:
case 3:
- tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, IS_USER(s));
break;
default:
abort();
@@ -6565,7 +6636,7 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
if (size == 3) {
tcg_gen_addi_i32(addr, addr, 4);
tmp = load_reg(s, rt2);
- tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, IS_USER(s));
tcg_temp_free_i32(tmp);
}
tcg_gen_movi_i32(cpu_R[rd], 0);
@@ -6612,11 +6683,11 @@ static void gen_srs(DisasContext *s,
}
tcg_gen_addi_i32(addr, addr, offset);
tmp = load_reg(s, 14);
- tcg_gen_qemu_st32(tmp, addr, 0);
+ gen_aa32_st32(tmp, addr, 0);
tcg_temp_free_i32(tmp);
tmp = load_cpu_field(spsr);
tcg_gen_addi_i32(addr, addr, 4);
- tcg_gen_qemu_st32(tmp, addr, 0);
+ gen_aa32_st32(tmp, addr, 0);
tcg_temp_free_i32(tmp);
if (writeback) {
switch (amode) {
@@ -6762,10 +6833,10 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
tcg_gen_addi_i32(addr, addr, offset);
/* Load PC into tmp and CPSR into tmp2. */
tmp = tcg_temp_new_i32();
- tcg_gen_qemu_ld32u(tmp, addr, 0);
+ gen_aa32_ld32u(tmp, addr, 0);
tcg_gen_addi_i32(addr, addr, 4);
tmp2 = tcg_temp_new_i32();
- tcg_gen_qemu_ld32u(tmp2, addr, 0);
+ gen_aa32_ld32u(tmp2, addr, 0);
if (insn & (1 << 21)) {
/* Base writeback. */
switch (i) {
@@ -7321,13 +7392,13 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
tmp = tcg_temp_new_i32();
switch (op1) {
case 0: /* lda */
- tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, IS_USER(s));
break;
case 2: /* ldab */
- tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
+ gen_aa32_ld8u(tmp, addr, IS_USER(s));
break;
case 3: /* ldah */
- tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
+ gen_aa32_ld16u(tmp, addr, IS_USER(s));
break;
default:
abort();
@@ -7338,13 +7409,13 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
tmp = load_reg(s, rm);
switch (op1) {
case 0: /* stl */
- tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, IS_USER(s));
break;
case 2: /* stlb */
- tcg_gen_qemu_st8(tmp, addr, IS_USER(s));
+ gen_aa32_st8(tmp, addr, IS_USER(s));
break;
case 3: /* stlh */
- tcg_gen_qemu_st16(tmp, addr, IS_USER(s));
+ gen_aa32_st16(tmp, addr, IS_USER(s));
break;
default:
abort();
@@ -7399,11 +7470,11 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
tmp = load_reg(s, rm);
tmp2 = tcg_temp_new_i32();
if (insn & (1 << 22)) {
- tcg_gen_qemu_ld8u(tmp2, addr, IS_USER(s));
- tcg_gen_qemu_st8(tmp, addr, IS_USER(s));
+ gen_aa32_ld8u(tmp2, addr, IS_USER(s));
+ gen_aa32_st8(tmp, addr, IS_USER(s));
} else {
- tcg_gen_qemu_ld32u(tmp2, addr, IS_USER(s));
- tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp2, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, IS_USER(s));
}
tcg_temp_free_i32(tmp);
tcg_temp_free_i32(addr);
@@ -7425,14 +7496,14 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
tmp = tcg_temp_new_i32();
switch(sh) {
case 1:
- tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
+ gen_aa32_ld16u(tmp, addr, IS_USER(s));
break;
case 2:
- tcg_gen_qemu_ld8s(tmp, addr, IS_USER(s));
+ gen_aa32_ld8s(tmp, addr, IS_USER(s));
break;
default:
case 3:
- tcg_gen_qemu_ld16s(tmp, addr, IS_USER(s));
+ gen_aa32_ld16s(tmp, addr, IS_USER(s));
break;
}
load = 1;
@@ -7442,21 +7513,21 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
if (sh & 1) {
/* store */
tmp = load_reg(s, rd);
- tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, IS_USER(s));
tcg_temp_free_i32(tmp);
tcg_gen_addi_i32(addr, addr, 4);
tmp = load_reg(s, rd + 1);
- tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, IS_USER(s));
tcg_temp_free_i32(tmp);
load = 0;
} else {
/* load */
tmp = tcg_temp_new_i32();
- tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, IS_USER(s));
store_reg(s, rd, tmp);
tcg_gen_addi_i32(addr, addr, 4);
tmp = tcg_temp_new_i32();
- tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, IS_USER(s));
rd++;
load = 1;
}
@@ -7464,7 +7535,7 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
} else {
/* store */
tmp = load_reg(s, rd);
- tcg_gen_qemu_st16(tmp, addr, IS_USER(s));
+ gen_aa32_st16(tmp, addr, IS_USER(s));
tcg_temp_free_i32(tmp);
load = 0;
}
@@ -7797,17 +7868,17 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
/* load */
tmp = tcg_temp_new_i32();
if (insn & (1 << 22)) {
- tcg_gen_qemu_ld8u(tmp, tmp2, i);
+ gen_aa32_ld8u(tmp, tmp2, i);
} else {
- tcg_gen_qemu_ld32u(tmp, tmp2, i);
+ gen_aa32_ld32u(tmp, tmp2, i);
}
} else {
/* store */
tmp = load_reg(s, rd);
if (insn & (1 << 22)) {
- tcg_gen_qemu_st8(tmp, tmp2, i);
+ gen_aa32_st8(tmp, tmp2, i);
} else {
- tcg_gen_qemu_st32(tmp, tmp2, i);
+ gen_aa32_st32(tmp, tmp2, i);
}
tcg_temp_free_i32(tmp);
}
@@ -7874,7 +7945,7 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
if (insn & (1 << 20)) {
/* load */
tmp = tcg_temp_new_i32();
- tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, IS_USER(s));
if (user) {
tmp2 = tcg_const_i32(i);
gen_helper_set_user_reg(cpu_env, tmp2, tmp);
@@ -7901,7 +7972,7 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
} else {
tmp = load_reg(s, i);
}
- tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, IS_USER(s));
tcg_temp_free_i32(tmp);
}
j++;
@@ -7957,8 +8028,8 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
tcg_gen_movi_i32(tmp, val);
store_reg(s, 14, tmp);
}
- offset = (((int32_t)insn << 8) >> 8);
- val += (offset << 2) + 4;
+ offset = sextract32(insn << 2, 0, 26);
+ val += offset + 4;
gen_jmp(s, val);
}
break;
@@ -7971,7 +8042,7 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
break;
case 0xf:
/* swi */
- gen_set_pc_im(s->pc);
+ gen_set_pc_im(s, s->pc);
s->is_jmp = DISAS_SWI;
break;
default:
@@ -8160,20 +8231,20 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
if (insn & (1 << 20)) {
/* ldrd */
tmp = tcg_temp_new_i32();
- tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, IS_USER(s));
store_reg(s, rs, tmp);
tcg_gen_addi_i32(addr, addr, 4);
tmp = tcg_temp_new_i32();
- tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, IS_USER(s));
store_reg(s, rd, tmp);
} else {
/* strd */
tmp = load_reg(s, rs);
- tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, IS_USER(s));
tcg_temp_free_i32(tmp);
tcg_gen_addi_i32(addr, addr, 4);
tmp = load_reg(s, rd);
- tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, IS_USER(s));
tcg_temp_free_i32(tmp);
}
if (insn & (1 << 21)) {
@@ -8211,11 +8282,11 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
tcg_gen_add_i32(addr, addr, tmp);
tcg_temp_free_i32(tmp);
tmp = tcg_temp_new_i32();
- tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
+ gen_aa32_ld16u(tmp, addr, IS_USER(s));
} else { /* tbb */
tcg_temp_free_i32(tmp);
tmp = tcg_temp_new_i32();
- tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
+ gen_aa32_ld8u(tmp, addr, IS_USER(s));
}
tcg_temp_free_i32(addr);
tcg_gen_shli_i32(tmp, tmp, 1);
@@ -8252,13 +8323,13 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
tmp = tcg_temp_new_i32();
switch (op) {
case 0: /* ldab */
- tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
+ gen_aa32_ld8u(tmp, addr, IS_USER(s));
break;
case 1: /* ldah */
- tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
+ gen_aa32_ld16u(tmp, addr, IS_USER(s));
break;
case 2: /* lda */
- tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, IS_USER(s));
break;
default:
abort();
@@ -8268,13 +8339,13 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
tmp = load_reg(s, rs);
switch (op) {
case 0: /* stlb */
- tcg_gen_qemu_st8(tmp, addr, IS_USER(s));
+ gen_aa32_st8(tmp, addr, IS_USER(s));
break;
case 1: /* stlh */
- tcg_gen_qemu_st16(tmp, addr, IS_USER(s));
+ gen_aa32_st16(tmp, addr, IS_USER(s));
break;
case 2: /* stl */
- tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, IS_USER(s));
break;
default:
abort();
@@ -8302,10 +8373,10 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
tcg_gen_addi_i32(addr, addr, -8);
/* Load PC into tmp and CPSR into tmp2. */
tmp = tcg_temp_new_i32();
- tcg_gen_qemu_ld32u(tmp, addr, 0);
+ gen_aa32_ld32u(tmp, addr, 0);
tcg_gen_addi_i32(addr, addr, 4);
tmp2 = tcg_temp_new_i32();
- tcg_gen_qemu_ld32u(tmp2, addr, 0);
+ gen_aa32_ld32u(tmp2, addr, 0);
if (insn & (1 << 21)) {
/* Base writeback. */
if (insn & (1 << 24)) {
@@ -8344,7 +8415,7 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
if (insn & (1 << 20)) {
/* Load. */
tmp = tcg_temp_new_i32();
- tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, IS_USER(s));
if (i == 15) {
gen_bx(s, tmp);
} else if (i == rn) {
@@ -8356,7 +8427,7 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
} else {
/* Store. */
tmp = load_reg(s, i);
- tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, IS_USER(s));
tcg_temp_free_i32(tmp);
}
tcg_gen_addi_i32(addr, addr, 4);
@@ -9134,19 +9205,19 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
tmp = tcg_temp_new_i32();
switch (op) {
case 0:
- tcg_gen_qemu_ld8u(tmp, addr, user);
+ gen_aa32_ld8u(tmp, addr, user);
break;
case 4:
- tcg_gen_qemu_ld8s(tmp, addr, user);
+ gen_aa32_ld8s(tmp, addr, user);
break;
case 1:
- tcg_gen_qemu_ld16u(tmp, addr, user);
+ gen_aa32_ld16u(tmp, addr, user);
break;
case 5:
- tcg_gen_qemu_ld16s(tmp, addr, user);
+ gen_aa32_ld16s(tmp, addr, user);
break;
case 2:
- tcg_gen_qemu_ld32u(tmp, addr, user);
+ gen_aa32_ld32u(tmp, addr, user);
break;
default:
tcg_temp_free_i32(tmp);
@@ -9163,13 +9234,13 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
tmp = load_reg(s, rs);
switch (op) {
case 0:
- tcg_gen_qemu_st8(tmp, addr, user);
+ gen_aa32_st8(tmp, addr, user);
break;
case 1:
- tcg_gen_qemu_st16(tmp, addr, user);
+ gen_aa32_st16(tmp, addr, user);
break;
case 2:
- tcg_gen_qemu_st32(tmp, addr, user);
+ gen_aa32_st32(tmp, addr, user);
break;
default:
tcg_temp_free_i32(tmp);
@@ -9306,7 +9377,7 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
addr = tcg_temp_new_i32();
tcg_gen_movi_i32(addr, val);
tmp = tcg_temp_new_i32();
- tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, IS_USER(s));
tcg_temp_free_i32(addr);
store_reg(s, rd, tmp);
break;
@@ -9509,28 +9580,28 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
switch (op) {
case 0: /* str */
- tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, IS_USER(s));
break;
case 1: /* strh */
- tcg_gen_qemu_st16(tmp, addr, IS_USER(s));
+ gen_aa32_st16(tmp, addr, IS_USER(s));
break;
case 2: /* strb */
- tcg_gen_qemu_st8(tmp, addr, IS_USER(s));
+ gen_aa32_st8(tmp, addr, IS_USER(s));
break;
case 3: /* ldrsb */
- tcg_gen_qemu_ld8s(tmp, addr, IS_USER(s));
+ gen_aa32_ld8s(tmp, addr, IS_USER(s));
break;
case 4: /* ldr */
- tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, IS_USER(s));
break;
case 5: /* ldrh */
- tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
+ gen_aa32_ld16u(tmp, addr, IS_USER(s));
break;
case 6: /* ldrb */
- tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
+ gen_aa32_ld8u(tmp, addr, IS_USER(s));
break;
case 7: /* ldrsh */
- tcg_gen_qemu_ld16s(tmp, addr, IS_USER(s));
+ gen_aa32_ld16s(tmp, addr, IS_USER(s));
break;
}
if (op >= 3) { /* load */
@@ -9552,12 +9623,12 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
if (insn & (1 << 11)) {
/* load */
tmp = tcg_temp_new_i32();
- tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, IS_USER(s));
store_reg(s, rd, tmp);
} else {
/* store */
tmp = load_reg(s, rd);
- tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, IS_USER(s));
tcg_temp_free_i32(tmp);
}
tcg_temp_free_i32(addr);
@@ -9574,12 +9645,12 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
if (insn & (1 << 11)) {
/* load */
tmp = tcg_temp_new_i32();
- tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
+ gen_aa32_ld8u(tmp, addr, IS_USER(s));
store_reg(s, rd, tmp);
} else {
/* store */
tmp = load_reg(s, rd);
- tcg_gen_qemu_st8(tmp, addr, IS_USER(s));
+ gen_aa32_st8(tmp, addr, IS_USER(s));
tcg_temp_free_i32(tmp);
}
tcg_temp_free_i32(addr);
@@ -9596,12 +9667,12 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
if (insn & (1 << 11)) {
/* load */
tmp = tcg_temp_new_i32();
- tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
+ gen_aa32_ld16u(tmp, addr, IS_USER(s));
store_reg(s, rd, tmp);
} else {
/* store */
tmp = load_reg(s, rd);
- tcg_gen_qemu_st16(tmp, addr, IS_USER(s));
+ gen_aa32_st16(tmp, addr, IS_USER(s));
tcg_temp_free_i32(tmp);
}
tcg_temp_free_i32(addr);
@@ -9617,12 +9688,12 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
if (insn & (1 << 11)) {
/* load */
tmp = tcg_temp_new_i32();
- tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, IS_USER(s));
store_reg(s, rd, tmp);
} else {
/* store */
tmp = load_reg(s, rd);
- tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, IS_USER(s));
tcg_temp_free_i32(tmp);
}
tcg_temp_free_i32(addr);
@@ -9690,12 +9761,12 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
if (insn & (1 << 11)) {
/* pop */
tmp = tcg_temp_new_i32();
- tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, IS_USER(s));
store_reg(s, i, tmp);
} else {
/* push */
tmp = load_reg(s, i);
- tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, IS_USER(s));
tcg_temp_free_i32(tmp);
}
/* advance to the next address. */
@@ -9707,13 +9778,13 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
if (insn & (1 << 11)) {
/* pop pc */
tmp = tcg_temp_new_i32();
- tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, IS_USER(s));
/* don't set the pc until the rest of the instruction
has completed */
} else {
/* push lr */
tmp = load_reg(s, 14);
- tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, IS_USER(s));
tcg_temp_free_i32(tmp);
}
tcg_gen_addi_i32(addr, addr, 4);
@@ -9839,7 +9910,7 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
if (insn & (1 << 11)) {
/* load */
tmp = tcg_temp_new_i32();
- tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, IS_USER(s));
if (i == rn) {
loaded_var = tmp;
} else {
@@ -9848,7 +9919,7 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
} else {
/* store */
tmp = load_reg(s, i);
- tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, IS_USER(s));
tcg_temp_free_i32(tmp);
}
/* advance to the next address */
@@ -9875,7 +9946,7 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
if (cond == 0xf) {
/* swi */
- gen_set_pc_im(s->pc);
+ gen_set_pc_im(s, s->pc);
s->is_jmp = DISAS_SWI;
break;
}
@@ -9932,7 +10003,7 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
uint16_t *gen_opc_end;
int j, lj;
target_ulong pc_start;
- uint32_t next_page_start;
+ target_ulong next_page_start;
int num_insns;
int max_insns;
@@ -9947,16 +10018,32 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
dc->pc = pc_start;
dc->singlestep_enabled = cs->singlestep_enabled;
dc->condjmp = 0;
- dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
- dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
- dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
- dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
+
+ if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
+ dc->aarch64 = 1;
+ dc->thumb = 0;
+ dc->bswap_code = 0;
+ dc->condexec_mask = 0;
+ dc->condexec_cond = 0;
#if !defined(CONFIG_USER_ONLY)
- dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
+ dc->user = 0;
#endif
- dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
- dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
- dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
+ dc->vfp_enabled = 0;
+ dc->vec_len = 0;
+ dc->vec_stride = 0;
+ } else {
+ dc->aarch64 = 0;
+ dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
+ dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
+ dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
+ dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
+#if !defined(CONFIG_USER_ONLY)
+ dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
+#endif
+ dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
+ dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
+ dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
+ }
cpu_F0s = tcg_temp_new_i32();
cpu_F1s = tcg_temp_new_i32();
cpu_F0d = tcg_temp_new_i64();
@@ -10018,7 +10105,7 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
do {
#ifdef CONFIG_USER_ONLY
/* Intercept jump to the magic kernel page. */
- if (dc->pc >= 0xffff0000) {
+ if (!dc->aarch64 && dc->pc >= 0xffff0000) {
/* We always get here via a jump, so know we are not in a
conditional execution block. */
gen_exception(EXCP_KERNEL_TRAP);
@@ -10066,7 +10153,9 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
tcg_gen_debug_insn_start(dc->pc);
}
- if (dc->thumb) {
+ if (dc->aarch64) {
+ disas_a64_insn(env, dc);
+ } else if (dc->thumb) {
disas_thumb_insn(env, dc);
if (dc->condexec_mask) {
dc->condexec_cond = (dc->condexec_cond & 0xe)
@@ -10086,7 +10175,8 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
}
if (tcg_check_temp_count()) {
- fprintf(stderr, "TCG temporary leak before %08x\n", dc->pc);
+ fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
+ dc->pc);
}
/* Translation stops when a conditional branch is encountered.
@@ -10124,7 +10214,7 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
gen_set_label(dc->condlabel);
}
if (dc->condjmp || !dc->is_jmp) {
- gen_set_pc_im(dc->pc);
+ gen_set_pc_im(dc, dc->pc);
dc->condjmp = 0;
}
gen_set_condexec(dc);
@@ -10258,6 +10348,10 @@ void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, int pc_pos)
{
- env->regs[15] = tcg_ctx.gen_opc_pc[pc_pos];
+ if (is_a64(env)) {
+ env->pc = tcg_ctx.gen_opc_pc[pc_pos];
+ } else {
+ env->regs[15] = tcg_ctx.gen_opc_pc[pc_pos];
+ }
env->condexec_bits = gen_opc_condexec_bits[pc_pos];
}
diff --git a/target-arm/translate.h b/target-arm/translate.h
new file mode 100644
index 0000000000..67c776053b
--- /dev/null
+++ b/target-arm/translate.h
@@ -0,0 +1,49 @@
+#ifndef TARGET_ARM_TRANSLATE_H
+#define TARGET_ARM_TRANSLATE_H
+
+/* internal defines */
+typedef struct DisasContext {
+ target_ulong pc;
+ uint32_t insn;
+ int is_jmp;
+ /* Nonzero if this instruction has been conditionally skipped. */
+ int condjmp;
+ /* The label that will be jumped to when the instruction is skipped. */
+ int condlabel;
+ /* Thumb-2 conditional execution bits. */
+ int condexec_mask;
+ int condexec_cond;
+ struct TranslationBlock *tb;
+ int singlestep_enabled;
+ int thumb;
+ int bswap_code;
+#if !defined(CONFIG_USER_ONLY)
+ int user;
+#endif
+ int vfp_enabled;
+ int vec_len;
+ int vec_stride;
+ int aarch64;
+} DisasContext;
+
+extern TCGv_ptr cpu_env;
+
+#ifdef TARGET_AARCH64
+void a64_translate_init(void);
+void disas_a64_insn(CPUARMState *env, DisasContext *s);
+void gen_a64_set_pc_im(uint64_t val);
+#else
+static inline void a64_translate_init(void)
+{
+}
+
+static inline void disas_a64_insn(CPUARMState *env, DisasContext *s)
+{
+}
+
+static inline void gen_a64_set_pc_im(uint64_t val)
+{
+}
+#endif
+
+#endif /* TARGET_ARM_TRANSLATE_H */