summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-xconfigure4
-rw-r--r--cpu-exec.c5
-rw-r--r--cpus.c2
-rw-r--r--cputlb.c7
-rw-r--r--default-configs/arm-softmmu.mak1
-rw-r--r--disas.c14
-rw-r--r--disas/Makefile.objs5
-rw-r--r--disas/arm-a64.cc87
-rw-r--r--disas/libvixl/LICENCE30
-rw-r--r--disas/libvixl/Makefile.objs8
-rw-r--r--disas/libvixl/README12
-rw-r--r--disas/libvixl/a64/assembler-a64.h1784
-rw-r--r--disas/libvixl/a64/constants-a64.h1104
-rw-r--r--disas/libvixl/a64/cpu-a64.h56
-rw-r--r--disas/libvixl/a64/decoder-a64.cc712
-rw-r--r--disas/libvixl/a64/decoder-a64.h198
-rw-r--r--disas/libvixl/a64/disasm-a64.cc1678
-rw-r--r--disas/libvixl/a64/disasm-a64.h109
-rw-r--r--disas/libvixl/a64/instructions-a64.cc238
-rw-r--r--disas/libvixl/a64/instructions-a64.h344
-rw-r--r--disas/libvixl/globals.h65
-rw-r--r--disas/libvixl/platform.h43
-rw-r--r--disas/libvixl/utils.cc120
-rw-r--r--disas/libvixl/utils.h126
-rw-r--r--exec.c218
-rw-r--r--hw/9pfs/cofile.c4
-rw-r--r--hw/9pfs/virtio-9p-handle.c8
-rw-r--r--hw/9pfs/virtio-9p-local.c10
-rw-r--r--hw/9pfs/virtio-9p-proxy.c3
-rw-r--r--hw/9pfs/virtio-9p.c12
-rw-r--r--hw/alpha/dp264.c5
-rw-r--r--hw/alpha/typhoon.c2
-rw-r--r--hw/arm/allwinner-a10.c16
-rw-r--r--hw/arm/boot.c5
-rw-r--r--hw/arm/cubieboard.c11
-rw-r--r--hw/arm/highbank.c6
-rw-r--r--hw/core/loader.c3
-rw-r--r--hw/display/sm501.c1
-rw-r--r--hw/display/sm501_template.h2
-rw-r--r--hw/dma/pl080.c9
-rw-r--r--hw/dma/sun4m_iommu.c3
-rw-r--r--hw/intc/apic.c3
-rw-r--r--hw/intc/arm_gic.c179
-rw-r--r--hw/intc/arm_gic_common.c8
-rw-r--r--hw/intc/gic_internal.h16
-rw-r--r--hw/microblaze/petalogix_ml605_mmu.c7
-rw-r--r--hw/misc/zynq_slcr.c5
-rw-r--r--hw/net/Makefile.objs1
-rw-r--r--hw/net/allwinner_emac.c539
-rw-r--r--hw/net/vmware_utils.h16
-rw-r--r--hw/pci/msi.c2
-rw-r--r--hw/pci/msix.c2
-rw-r--r--hw/ppc/ppc405_uc.c45
-rw-r--r--hw/ppc/spapr_hcall.c50
-rw-r--r--hw/s390x/css.c11
-rw-r--r--hw/s390x/s390-virtio-bus.c36
-rw-r--r--hw/s390x/s390-virtio.c2
-rw-r--r--hw/s390x/virtio-ccw.c40
-rw-r--r--hw/scsi/megasas.c22
-rw-r--r--hw/scsi/vmw_pvscsi.c6
-rw-r--r--hw/sh4/r2d.c4
-rw-r--r--hw/sparc/sun4m.c3
-rw-r--r--hw/timer/hpet.c3
-rw-r--r--hw/virtio/virtio.c31
-rw-r--r--include/disas/bfd.h1
-rw-r--r--include/exec/cpu-common.h44
-rw-r--r--include/exec/exec-all.h5
-rw-r--r--include/exec/memory.h2
-rw-r--r--include/exec/softmmu_template.h7
-rw-r--r--include/hw/arm/allwinner-a10.h3
-rw-r--r--include/hw/intc/arm_gic_common.h33
-rw-r--r--include/hw/net/allwinner_emac.h210
-rw-r--r--include/hw/ppc/spapr.h4
-rw-r--r--include/migration/vmstate.h6
-rw-r--r--include/qemu/fifo8.h61
-rw-r--r--include/qemu/typedefs.h1
-rw-r--r--include/qom/cpu.h3
-rw-r--r--monitor.c2
-rw-r--r--rules.mak14
-rw-r--r--target-alpha/helper.c7
-rw-r--r--target-alpha/helper.h8
-rw-r--r--target-alpha/mem_helper.c36
-rw-r--r--target-alpha/translate.c8
-rw-r--r--target-arm/helper.c21
-rw-r--r--target-arm/helper.h1
-rw-r--r--target-arm/neon_helper.c12
-rw-r--r--target-arm/translate-a64.c1125
-rw-r--r--target-arm/translate.c83
-rw-r--r--target-i386/arch_memory_mapping.c46
-rw-r--r--target-i386/helper.c48
-rw-r--r--target-i386/seg_helper.c14
-rw-r--r--target-i386/smm_helper.c300
-rw-r--r--target-i386/svm_helper.c299
-rw-r--r--target-ppc/excp_helper.c4
-rw-r--r--target-ppc/mmu-hash32.h12
-rw-r--r--target-ppc/mmu-hash64.h14
-rw-r--r--target-s390x/cpu.c2
-rw-r--r--target-s390x/helper.c11
-rw-r--r--target-s390x/mem_helper.c9
-rw-r--r--target-sparc/ldst_helper.c72
-rw-r--r--target-sparc/mmu_helper.c22
-rw-r--r--target-unicore32/softmmu.c5
-rw-r--r--target-xtensa/helper.c3
-rw-r--r--target-xtensa/op_helper.c3
-rw-r--r--tcg/tcg.h3
-rw-r--r--translate-all.c4
-rw-r--r--util/fifo8.c47
107 files changed, 9908 insertions, 808 deletions
diff --git a/configure b/configure
index 5b20ce6139..88133a100e 100755
--- a/configure
+++ b/configure
@@ -4667,6 +4667,10 @@ for i in $ARCH $TARGET_BASE_ARCH ; do
arm)
echo "CONFIG_ARM_DIS=y" >> $config_target_mak
echo "CONFIG_ARM_DIS=y" >> config-all-disas.mak
+ if test -n "${cxx}"; then
+ echo "CONFIG_ARM_A64_DIS=y" >> $config_target_mak
+ echo "CONFIG_ARM_A64_DIS=y" >> config-all-disas.mak
+ fi
;;
cris)
echo "CONFIG_CRIS_DIS=y" >> $config_target_mak
diff --git a/cpu-exec.c b/cpu-exec.c
index a6c01f4193..8943493001 100644
--- a/cpu-exec.c
+++ b/cpu-exec.c
@@ -395,7 +395,10 @@ int cpu_exec(CPUArchState *env)
/* FIXME: this should respect TPR */
cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
0);
- intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
+ intno = ldl_phys(cpu->as,
+ env->vm_vmcb
+ + offsetof(struct vmcb,
+ control.int_vector));
qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
do_interrupt_x86_hardirq(env, intno, 1);
cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
diff --git a/cpus.c b/cpus.c
index ca4c59fe0b..945d85b326 100644
--- a/cpus.c
+++ b/cpus.c
@@ -1119,6 +1119,8 @@ void resume_all_vcpus(void)
static void qemu_tcg_init_vcpu(CPUState *cpu)
{
+ tcg_cpu_address_space_init(cpu, cpu->as);
+
/* share a single thread for all cpus with TCG */
if (!tcg_cpu_thread) {
cpu->thread = g_malloc0(sizeof(QemuThread));
diff --git a/cputlb.c b/cputlb.c
index b533f3f372..0fbaa39412 100644
--- a/cputlb.c
+++ b/cputlb.c
@@ -232,6 +232,7 @@ void tlb_set_page(CPUArchState *env, target_ulong vaddr,
uintptr_t addend;
CPUTLBEntry *te;
hwaddr iotlb, xlat, sz;
+ CPUState *cpu = ENV_GET_CPU(env);
assert(size >= TARGET_PAGE_SIZE);
if (size != TARGET_PAGE_SIZE) {
@@ -239,7 +240,7 @@ void tlb_set_page(CPUArchState *env, target_ulong vaddr,
}
sz = size;
- section = address_space_translate_for_iotlb(&address_space_memory, paddr,
+ section = address_space_translate_for_iotlb(cpu->as, paddr,
&xlat, &sz);
assert(sz >= TARGET_PAGE_SIZE);
@@ -305,6 +306,7 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
int mmu_idx, page_index, pd;
void *p;
MemoryRegion *mr;
+ CPUState *cpu = ENV_GET_CPU(env1);
page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
mmu_idx = cpu_mmu_index(env1);
@@ -313,9 +315,8 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
cpu_ldub_code(env1, addr);
}
pd = env1->iotlb[mmu_idx][page_index] & ~TARGET_PAGE_MASK;
- mr = iotlb_to_region(pd);
+ mr = iotlb_to_region(cpu->as, pd);
if (memory_region_is_unassigned(mr)) {
- CPUState *cpu = ENV_GET_CPU(env1);
CPUClass *cc = CPU_GET_CLASS(cpu);
if (cc->do_unassigned_access) {
diff --git a/default-configs/arm-softmmu.mak b/default-configs/arm-softmmu.mak
index ce1d620842..f3513fa124 100644
--- a/default-configs/arm-softmmu.mak
+++ b/default-configs/arm-softmmu.mak
@@ -27,6 +27,7 @@ CONFIG_SSI_SD=y
CONFIG_SSI_M25P80=y
CONFIG_LAN9118=y
CONFIG_SMC91C111=y
+CONFIG_ALLWINNER_EMAC=y
CONFIG_DS1338=y
CONFIG_PFLASH_CFI01=y
CONFIG_PFLASH_CFI02=y
diff --git a/disas.c b/disas.c
index 0203ef2ef2..79e694483c 100644
--- a/disas.c
+++ b/disas.c
@@ -190,7 +190,7 @@ static int print_insn_od_target(bfd_vma pc, disassemble_info *info)
/* Disassemble this for me please... (debugging). 'flags' has the following
values:
i386 - 1 means 16 bit code, 2 means 64 bit code
- arm - bit 0 = thumb, bit 1 = reverse endian
+ arm - bit 0 = thumb, bit 1 = reverse endian, bit 2 = A64
ppc - nonzero means little endian
other targets - unused
*/
@@ -225,7 +225,15 @@ void target_disas(FILE *out, CPUArchState *env, target_ulong code,
}
print_insn = print_insn_i386;
#elif defined(TARGET_ARM)
- if (flags & 1) {
+ if (flags & 4) {
+ /* We might not be compiled with the A64 disassembler
+ * because it needs a C++ compiler; in that case we will
+ * fall through to the default print_insn_od case.
+ */
+#if defined(CONFIG_ARM_A64_DIS)
+ print_insn = print_insn_arm_a64;
+#endif
+ } else if (flags & 1) {
print_insn = print_insn_thumb1;
} else {
print_insn = print_insn_arm;
@@ -356,6 +364,8 @@ void disas(FILE *out, void *code, unsigned long size)
#elif defined(_ARCH_PPC)
s.info.disassembler_options = (char *)"any";
print_insn = print_insn_ppc;
+#elif defined(__aarch64__) && defined(CONFIG_ARM_A64_DIS)
+ print_insn = print_insn_arm_a64;
#elif defined(__alpha__)
print_insn = print_insn_alpha;
#elif defined(__sparc__)
diff --git a/disas/Makefile.objs b/disas/Makefile.objs
index 3b1e77ace5..41c237424a 100644
--- a/disas/Makefile.objs
+++ b/disas/Makefile.objs
@@ -1,5 +1,10 @@
+
common-obj-$(CONFIG_ALPHA_DIS) += alpha.o
common-obj-$(CONFIG_ARM_DIS) += arm.o
+common-obj-$(CONFIG_ARM_A64_DIS) += arm-a64.o
+common-obj-$(CONFIG_ARM_A64_DIS) += libvixl/
+libvixldir = $(SRC_PATH)/disas/libvixl
+$(obj)/arm-a64.o: QEMU_CFLAGS += -I$(libvixldir)
common-obj-$(CONFIG_CRIS_DIS) += cris.o
common-obj-$(CONFIG_HPPA_DIS) += hppa.o
common-obj-$(CONFIG_I386_DIS) += i386.o
diff --git a/disas/arm-a64.cc b/disas/arm-a64.cc
new file mode 100644
index 0000000000..162be0c420
--- /dev/null
+++ b/disas/arm-a64.cc
@@ -0,0 +1,87 @@
+/*
+ * ARM A64 disassembly output wrapper to libvixl
+ * Copyright (c) 2013 Linaro Limited
+ * Written by Claudio Fontana
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "a64/disasm-a64.h"
+
+extern "C" {
+#include "disas/bfd.h"
+}
+
+using namespace vixl;
+
+static Decoder *vixl_decoder = NULL;
+static Disassembler *vixl_disasm = NULL;
+
+/* We don't use libvixl's PrintDisassembler because its output
+ * is a little unhelpful (trailing newlines, for example).
+ * Instead we use our own very similar variant so we have
+ * control over the format.
+ */
+class QEMUDisassembler : public Disassembler {
+public:
+ explicit QEMUDisassembler(FILE *stream) : stream_(stream) { }
+ ~QEMUDisassembler() { }
+
+protected:
+ void ProcessOutput(Instruction *instr) {
+ fprintf(stream_, "%08" PRIx32 " %s",
+ instr->InstructionBits(), GetOutput());
+ }
+
+private:
+ FILE *stream_;
+};
+
+static int vixl_is_initialized(void)
+{
+ return vixl_decoder != NULL;
+}
+
+static void vixl_init(FILE *f) {
+ vixl_decoder = new Decoder();
+ vixl_disasm = new QEMUDisassembler(f);
+ vixl_decoder->AppendVisitor(vixl_disasm);
+}
+
+#define INSN_SIZE 4
+
+/* Disassemble ARM A64 instruction. This is our only entry
+ * point from QEMU's C code.
+ */
+int print_insn_arm_a64(uint64_t addr, disassemble_info *info)
+{
+ uint8_t bytes[INSN_SIZE];
+ uint32_t instr;
+ int status;
+
+ status = info->read_memory_func(addr, bytes, INSN_SIZE, info);
+ if (status != 0) {
+ info->memory_error_func(status, addr, info);
+ return -1;
+ }
+
+ if (!vixl_is_initialized()) {
+ vixl_init(info->stream);
+ }
+
+ instr = bytes[0] | bytes[1] << 8 | bytes[2] << 16 | bytes[3] << 24;
+ vixl_decoder->Decode(reinterpret_cast<Instruction*>(&instr));
+
+ return INSN_SIZE;
+}
diff --git a/disas/libvixl/LICENCE b/disas/libvixl/LICENCE
new file mode 100644
index 0000000000..b7e160a3f5
--- /dev/null
+++ b/disas/libvixl/LICENCE
@@ -0,0 +1,30 @@
+LICENCE
+=======
+
+The software in this repository is covered by the following licence.
+
+// Copyright 2013, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/disas/libvixl/Makefile.objs b/disas/libvixl/Makefile.objs
new file mode 100644
index 0000000000..0adb3ced7b
--- /dev/null
+++ b/disas/libvixl/Makefile.objs
@@ -0,0 +1,8 @@
+libvixl_OBJS = utils.o \
+ a64/instructions-a64.o \
+ a64/decoder-a64.o \
+ a64/disasm-a64.o
+
+$(addprefix $(obj)/,$(libvixl_OBJS)): QEMU_CFLAGS += -I$(SRC_PATH)/disas/libvixl
+
+common-obj-$(CONFIG_ARM_A64_DIS) += $(libvixl_OBJS)
diff --git a/disas/libvixl/README b/disas/libvixl/README
new file mode 100644
index 0000000000..96814a5dc1
--- /dev/null
+++ b/disas/libvixl/README
@@ -0,0 +1,12 @@
+
+The code in this directory is a subset of libvixl:
+ https://github.com/armvixl/vixl
+(specifically, it is the set of files needed for disassembly only,
+taken from libvixl 1.1).
+Bugfixes should preferably be sent upstream initially.
+
+The disassembler does not currently support the entire A64 instruction
+set. Notably:
+ * No Advanced SIMD support.
+ * Limited support for system instructions.
+ * A few miscellaneous integer and floating point instructions are missing.
diff --git a/disas/libvixl/a64/assembler-a64.h b/disas/libvixl/a64/assembler-a64.h
new file mode 100644
index 0000000000..93b3011868
--- /dev/null
+++ b/disas/libvixl/a64/assembler-a64.h
@@ -0,0 +1,1784 @@
+// Copyright 2013, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef VIXL_A64_ASSEMBLER_A64_H_
+#define VIXL_A64_ASSEMBLER_A64_H_
+
+#include <list>
+
+#include "globals.h"
+#include "utils.h"
+#include "a64/instructions-a64.h"
+
+namespace vixl {
+
+typedef uint64_t RegList;
+static const int kRegListSizeInBits = sizeof(RegList) * 8;
+
+// Registers.
+
+// Some CPURegister methods can return Register and FPRegister types, so we
+// need to declare them in advance.
+class Register;
+class FPRegister;
+
+
+class CPURegister {
+ public:
+ enum RegisterType {
+ // The kInvalid value is used to detect uninitialized static instances,
+ // which are always zero-initialized before any constructors are called.
+ kInvalid = 0,
+ kRegister,
+ kFPRegister,
+ kNoRegister
+ };
+
+ CPURegister() : code_(0), size_(0), type_(kNoRegister) {
+ ASSERT(!IsValid());
+ ASSERT(IsNone());
+ }
+
+ CPURegister(unsigned code, unsigned size, RegisterType type)
+ : code_(code), size_(size), type_(type) {
+ ASSERT(IsValidOrNone());
+ }
+
+ unsigned code() const {
+ ASSERT(IsValid());
+ return code_;
+ }
+
+ RegisterType type() const {
+ ASSERT(IsValidOrNone());
+ return type_;
+ }
+
+ RegList Bit() const {
+ ASSERT(code_ < (sizeof(RegList) * 8));
+ return IsValid() ? (static_cast<RegList>(1) << code_) : 0;
+ }
+
+ unsigned size() const {
+ ASSERT(IsValid());
+ return size_;
+ }
+
+ int SizeInBytes() const {
+ ASSERT(IsValid());
+ ASSERT(size() % 8 == 0);
+ return size_ / 8;
+ }
+
+ int SizeInBits() const {
+ ASSERT(IsValid());
+ return size_;
+ }
+
+ bool Is32Bits() const {
+ ASSERT(IsValid());
+ return size_ == 32;
+ }
+
+ bool Is64Bits() const {
+ ASSERT(IsValid());
+ return size_ == 64;
+ }
+
+ bool IsValid() const {
+ if (IsValidRegister() || IsValidFPRegister()) {
+ ASSERT(!IsNone());
+ return true;
+ } else {
+ ASSERT(IsNone());
+ return false;
+ }
+ }
+
+ bool IsValidRegister() const {
+ return IsRegister() &&
+ ((size_ == kWRegSize) || (size_ == kXRegSize)) &&
+ ((code_ < kNumberOfRegisters) || (code_ == kSPRegInternalCode));
+ }
+
+ bool IsValidFPRegister() const {
+ return IsFPRegister() &&
+ ((size_ == kSRegSize) || (size_ == kDRegSize)) &&
+ (code_ < kNumberOfFPRegisters);
+ }
+
+ bool IsNone() const {
+ // kNoRegister types should always have size 0 and code 0.
+ ASSERT((type_ != kNoRegister) || (code_ == 0));
+ ASSERT((type_ != kNoRegister) || (size_ == 0));
+
+ return type_ == kNoRegister;
+ }
+
+ bool Is(const CPURegister& other) const {
+ ASSERT(IsValidOrNone() && other.IsValidOrNone());
+ return (code_ == other.code_) && (size_ == other.size_) &&
+ (type_ == other.type_);
+ }
+
+ inline bool IsZero() const {
+ ASSERT(IsValid());
+ return IsRegister() && (code_ == kZeroRegCode);
+ }
+
+ inline bool IsSP() const {
+ ASSERT(IsValid());
+ return IsRegister() && (code_ == kSPRegInternalCode);
+ }
+
+ inline bool IsRegister() const {
+ return type_ == kRegister;
+ }
+
+ inline bool IsFPRegister() const {
+ return type_ == kFPRegister;
+ }
+
+ const Register& W() const;
+ const Register& X() const;
+ const FPRegister& S() const;
+ const FPRegister& D() const;
+
+ inline bool IsSameSizeAndType(const CPURegister& other) const {
+ return (size_ == other.size_) && (type_ == other.type_);
+ }
+
+ protected:
+ unsigned code_;
+ unsigned size_;
+ RegisterType type_;
+
+ private:
+ bool IsValidOrNone() const {
+ return IsValid() || IsNone();
+ }
+};
+
+
+class Register : public CPURegister {
+ public:
+ explicit Register() : CPURegister() {}
+ inline explicit Register(const CPURegister& other)
+ : CPURegister(other.code(), other.size(), other.type()) {
+ ASSERT(IsValidRegister());
+ }
+ explicit Register(unsigned code, unsigned size)
+ : CPURegister(code, size, kRegister) {}
+
+ bool IsValid() const {
+ ASSERT(IsRegister() || IsNone());
+ return IsValidRegister();
+ }
+
+ static const Register& WRegFromCode(unsigned code);
+ static const Register& XRegFromCode(unsigned code);
+
+ // V8 compatibility.
+ static const int kNumRegisters = kNumberOfRegisters;
+ static const int kNumAllocatableRegisters = kNumberOfRegisters - 1;
+
+ private:
+ static const Register wregisters[];
+ static const Register xregisters[];
+};
+
+
+class FPRegister : public CPURegister {
+ public:
+ inline FPRegister() : CPURegister() {}
+ inline explicit FPRegister(const CPURegister& other)
+ : CPURegister(other.code(), other.size(), other.type()) {
+ ASSERT(IsValidFPRegister());
+ }
+ inline FPRegister(unsigned code, unsigned size)
+ : CPURegister(code, size, kFPRegister) {}
+
+ bool IsValid() const {
+ ASSERT(IsFPRegister() || IsNone());
+ return IsValidFPRegister();
+ }
+
+ static const FPRegister& SRegFromCode(unsigned code);
+ static const FPRegister& DRegFromCode(unsigned code);
+
+ // V8 compatibility.
+ static const int kNumRegisters = kNumberOfFPRegisters;
+ static const int kNumAllocatableRegisters = kNumberOfFPRegisters - 1;
+
+ private:
+ static const FPRegister sregisters[];
+ static const FPRegister dregisters[];
+};
+
+
+// No*Reg is used to indicate an unused argument, or an error case. Note that
+// these all compare equal (using the Is() method). The Register and FPRegister
+// variants are provided for convenience.
+const Register NoReg;
+const FPRegister NoFPReg;
+const CPURegister NoCPUReg;
+
+
+#define DEFINE_REGISTERS(N) \
+const Register w##N(N, kWRegSize); \
+const Register x##N(N, kXRegSize);
+REGISTER_CODE_LIST(DEFINE_REGISTERS)
+#undef DEFINE_REGISTERS
+const Register wsp(kSPRegInternalCode, kWRegSize);
+const Register sp(kSPRegInternalCode, kXRegSize);
+
+
+#define DEFINE_FPREGISTERS(N) \
+const FPRegister s##N(N, kSRegSize); \
+const FPRegister d##N(N, kDRegSize);
+REGISTER_CODE_LIST(DEFINE_FPREGISTERS)
+#undef DEFINE_FPREGISTERS
+
+
+// Registers aliases.
+const Register ip0 = x16;
+const Register ip1 = x17;
+const Register lr = x30;
+const Register xzr = x31;
+const Register wzr = w31;
+
+
+// AreAliased returns true if any of the named registers overlap. Arguments
+// set to NoReg are ignored. The system stack pointer may be specified.
+bool AreAliased(const CPURegister& reg1,
+ const CPURegister& reg2,
+ const CPURegister& reg3 = NoReg,
+ const CPURegister& reg4 = NoReg,
+ const CPURegister& reg5 = NoReg,
+ const CPURegister& reg6 = NoReg,
+ const CPURegister& reg7 = NoReg,
+ const CPURegister& reg8 = NoReg);
+
+
+// AreSameSizeAndType returns true if all of the specified registers have the
+// same size, and are of the same type. The system stack pointer may be
+// specified. Arguments set to NoReg are ignored, as are any subsequent
+// arguments. At least one argument (reg1) must be valid (not NoCPUReg).
+bool AreSameSizeAndType(const CPURegister& reg1,
+ const CPURegister& reg2,
+ const CPURegister& reg3 = NoCPUReg,
+ const CPURegister& reg4 = NoCPUReg,
+ const CPURegister& reg5 = NoCPUReg,
+ const CPURegister& reg6 = NoCPUReg,
+ const CPURegister& reg7 = NoCPUReg,
+ const CPURegister& reg8 = NoCPUReg);
+
+
+// Lists of registers.
+class CPURegList {
+ public:
+ inline explicit CPURegList(CPURegister reg1,
+ CPURegister reg2 = NoCPUReg,
+ CPURegister reg3 = NoCPUReg,
+ CPURegister reg4 = NoCPUReg)
+ : list_(reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit()),
+ size_(reg1.size()), type_(reg1.type()) {
+ ASSERT(AreSameSizeAndType(reg1, reg2, reg3, reg4));
+ ASSERT(IsValid());
+ }
+
+ inline CPURegList(CPURegister::RegisterType type, unsigned size, RegList list)
+ : list_(list), size_(size), type_(type) {
+ ASSERT(IsValid());
+ }
+
+ inline CPURegList(CPURegister::RegisterType type, unsigned size,
+ unsigned first_reg, unsigned last_reg)
+ : size_(size), type_(type) {
+ ASSERT(((type == CPURegister::kRegister) &&
+ (last_reg < kNumberOfRegisters)) ||
+ ((type == CPURegister::kFPRegister) &&
+ (last_reg < kNumberOfFPRegisters)));
+ ASSERT(last_reg >= first_reg);
+ list_ = (1UL << (last_reg + 1)) - 1;
+ list_ &= ~((1UL << first_reg) - 1);
+ ASSERT(IsValid());
+ }
+
+ inline CPURegister::RegisterType type() const {
+ ASSERT(IsValid());
+ return type_;
+ }
+
+ // Combine another CPURegList into this one. Registers that already exist in
+ // this list are left unchanged. The type and size of the registers in the
+ // 'other' list must match those in this list.
+ void Combine(const CPURegList& other) {
+ ASSERT(IsValid());
+ ASSERT(other.type() == type_);
+ ASSERT(other.RegisterSizeInBits() == size_);
+ list_ |= other.list();
+ }
+
+ // Remove every register in the other CPURegList from this one. Registers that
+ // do not exist in this list are ignored. The type and size of the registers
+ // in the 'other' list must match those in this list.
+ void Remove(const CPURegList& other) {
+ ASSERT(IsValid());
+ ASSERT(other.type() == type_);
+ ASSERT(other.RegisterSizeInBits() == size_);
+ list_ &= ~other.list();
+ }
+
+ // Variants of Combine and Remove which take a single register.
+ inline void Combine(const CPURegister& other) {
+ ASSERT(other.type() == type_);
+ ASSERT(other.size() == size_);
+ Combine(other.code());
+ }
+
+ inline void Remove(const CPURegister& other) {
+ ASSERT(other.type() == type_);
+ ASSERT(other.size() == size_);
+ Remove(other.code());
+ }
+
+ // Variants of Combine and Remove which take a single register by its code;
+ // the type and size of the register is inferred from this list.
+ inline void Combine(int code) {
+ ASSERT(IsValid());
+ ASSERT(CPURegister(code, size_, type_).IsValid());
+ list_ |= (1UL << code);
+ }
+
+ inline void Remove(int code) {
+ ASSERT(IsValid());
+ ASSERT(CPURegister(code, size_, type_).IsValid());
+ list_ &= ~(1UL << code);
+ }
+
+ inline RegList list() const {
+ ASSERT(IsValid());
+ return list_;
+ }
+
+ // Remove all callee-saved registers from the list. This can be useful when
+ // preparing registers for an AAPCS64 function call, for example.
+ void RemoveCalleeSaved();
+
+ CPURegister PopLowestIndex();
+ CPURegister PopHighestIndex();
+
+ // AAPCS64 callee-saved registers.
+ static CPURegList GetCalleeSaved(unsigned size = kXRegSize);
+ static CPURegList GetCalleeSavedFP(unsigned size = kDRegSize);
+
+ // AAPCS64 caller-saved registers. Note that this includes lr.
+ static CPURegList GetCallerSaved(unsigned size = kXRegSize);
+ static CPURegList GetCallerSavedFP(unsigned size = kDRegSize);
+
+ inline bool IsEmpty() const {
+ ASSERT(IsValid());
+ return list_ == 0;
+ }
+
+ inline bool IncludesAliasOf(const CPURegister& other) const {
+ ASSERT(IsValid());
+ return (type_ == other.type()) && (other.Bit() & list_);
+ }
+
+ inline int Count() const {
+ ASSERT(IsValid());
+ return CountSetBits(list_, kRegListSizeInBits);
+ }
+
+ inline unsigned RegisterSizeInBits() const {
+ ASSERT(IsValid());
+ return size_;
+ }
+
+ inline unsigned RegisterSizeInBytes() const {
+ int size_in_bits = RegisterSizeInBits();
+ ASSERT((size_in_bits % 8) == 0);
+ return size_in_bits / 8;
+ }
+
+ private:
+ RegList list_;
+ unsigned size_;
+ CPURegister::RegisterType type_;
+
+ bool IsValid() const;
+};
+
+
+// AAPCS64 callee-saved registers.
+extern const CPURegList kCalleeSaved;
+extern const CPURegList kCalleeSavedFP;
+
+
+// AAPCS64 caller-saved registers. Note that this includes lr.
+extern const CPURegList kCallerSaved;
+extern const CPURegList kCallerSavedFP;
+
+
+// Operand.
+class Operand {
+ public:
+ // #<immediate>
+ // where <immediate> is int64_t.
+ // This is allowed to be an implicit constructor because Operand is
+ // a wrapper class that doesn't normally perform any type conversion.
+ Operand(int64_t immediate); // NOLINT(runtime/explicit)
+
+ // rm, {<shift> #<shift_amount>}
+ // where <shift> is one of {LSL, LSR, ASR, ROR}.
+ // <shift_amount> is uint6_t.
+ // This is allowed to be an implicit constructor because Operand is
+ // a wrapper class that doesn't normally perform any type conversion.
+ Operand(Register reg,
+ Shift shift = LSL,
+ unsigned shift_amount = 0); // NOLINT(runtime/explicit)
+
+ // rm, {<extend> {#<shift_amount>}}
+ // where <extend> is one of {UXTB, UXTH, UXTW, UXTX, SXTB, SXTH, SXTW, SXTX}.
+ // <shift_amount> is uint2_t.
+ explicit Operand(Register reg, Extend extend, unsigned shift_amount = 0);
+
+ bool IsImmediate() const;
+ bool IsShiftedRegister() const;
+ bool IsExtendedRegister() const;
+
+ // This returns an LSL shift (<= 4) operand as an equivalent extend operand,
+ // which helps in the encoding of instructions that use the stack pointer.
+ Operand ToExtendedRegister() const;
+
+ int64_t immediate() const {
+ ASSERT(IsImmediate());
+ return immediate_;
+ }
+
+ Register reg() const {
+ ASSERT(IsShiftedRegister() || IsExtendedRegister());
+ return reg_;
+ }
+
+ Shift shift() const {
+ ASSERT(IsShiftedRegister());
+ return shift_;
+ }
+
+ Extend extend() const {
+ ASSERT(IsExtendedRegister());
+ return extend_;
+ }
+
+ unsigned shift_amount() const {
+ ASSERT(IsShiftedRegister() || IsExtendedRegister());
+ return shift_amount_;
+ }
+
+ private:
+ int64_t immediate_;
+ Register reg_;
+ Shift shift_;
+ Extend extend_;
+ unsigned shift_amount_;
+};
+
+
+// MemOperand represents the addressing mode of a load or store instruction.
+class MemOperand {
+ public:
+ explicit MemOperand(Register base,
+ ptrdiff_t offset = 0,
+ AddrMode addrmode = Offset);
+ explicit MemOperand(Register base,
+ Register regoffset,
+ Shift shift = LSL,
+ unsigned shift_amount = 0);
+ explicit MemOperand(Register base,
+ Register regoffset,
+ Extend extend,
+ unsigned shift_amount = 0);
+ explicit MemOperand(Register base,
+ const Operand& offset,
+ AddrMode addrmode = Offset);
+
+ const Register& base() const { return base_; }
+ const Register& regoffset() const { return regoffset_; }
+ ptrdiff_t offset() const { return offset_; }
+ AddrMode addrmode() const { return addrmode_; }
+ Shift shift() const { return shift_; }
+ Extend extend() const { return extend_; }
+ unsigned shift_amount() const { return shift_amount_; }
+ bool IsImmediateOffset() const;
+ bool IsRegisterOffset() const;
+ bool IsPreIndex() const;
+ bool IsPostIndex() const;
+
+ private:
+ Register base_;
+ Register regoffset_;
+ ptrdiff_t offset_;
+ AddrMode addrmode_;
+ Shift shift_;
+ Extend extend_;
+ unsigned shift_amount_;
+};
+
+
+class Label {
+ public:
+ Label() : is_bound_(false), link_(NULL), target_(NULL) {}
+ ~Label() {
+ // If the label has been linked to, it needs to be bound to a target.
+ ASSERT(!IsLinked() || IsBound());
+ }
+
+ inline Instruction* link() const { return link_; }
+ inline Instruction* target() const { return target_; }
+
+ inline bool IsBound() const { return is_bound_; }
+ inline bool IsLinked() const { return link_ != NULL; }
+
+ inline void set_link(Instruction* new_link) { link_ = new_link; }
+
+ static const int kEndOfChain = 0;
+
+ private:
+ // Indicates if the label has been bound, ie its location is fixed.
+ bool is_bound_;
+ // Branches instructions branching to this label form a chained list, with
+ // their offset indicating where the next instruction is located.
+ // link_ points to the latest branch instruction generated branching to this
+ // branch.
+ // If link_ is not NULL, the label has been linked to.
+ Instruction* link_;
+ // The label location.
+ Instruction* target_;
+
+ friend class Assembler;
+};
+
+
+// TODO: Obtain better values for these, based on real-world data.
+const int kLiteralPoolCheckInterval = 4 * KBytes;
+const int kRecommendedLiteralPoolRange = 2 * kLiteralPoolCheckInterval;
+
+
+// Control whether a branch over the literal pool should also be emitted. This
+// is needed if the literal pool has to be emitted in the middle of the JITted
+// code.
+enum LiteralPoolEmitOption {
+ JumpRequired,
+ NoJumpRequired
+};
+
+
+// Literal pool entry.
+class Literal {
+ public:
+ Literal(Instruction* pc, uint64_t imm, unsigned size)
+ : pc_(pc), value_(imm), size_(size) {}
+
+ private:
+ Instruction* pc_;
+ int64_t value_;
+ unsigned size_;
+
+ friend class Assembler;
+};
+
+
+// Assembler.
+class Assembler {
+ public:
+ Assembler(byte* buffer, unsigned buffer_size);
+
+ // The destructor asserts that one of the following is true:
+ // * The Assembler object has not been used.
+ // * Nothing has been emitted since the last Reset() call.
+ // * Nothing has been emitted since the last FinalizeCode() call.
+ ~Assembler();
+
+ // System functions.
+
+ // Start generating code from the beginning of the buffer, discarding any code
+ // and data that has already been emitted into the buffer.
+ //
+ // In order to avoid any accidental transfer of state, Reset ASSERTs that the
+ // constant pool is not blocked.
+ void Reset();
+
+ // Finalize a code buffer of generated instructions. This function must be
+ // called before executing or copying code from the buffer.
+ void FinalizeCode();
+
+ // Label.
+ // Bind a label to the current PC.
+ void bind(Label* label);
+ int UpdateAndGetByteOffsetTo(Label* label);
+ inline int UpdateAndGetInstructionOffsetTo(Label* label) {
+ ASSERT(Label::kEndOfChain == 0);
+ return UpdateAndGetByteOffsetTo(label) >> kInstructionSizeLog2;
+ }
+
+
+ // Instruction set functions.
+
+ // Branch / Jump instructions.
+ // Branch to register.
+ void br(const Register& xn);
+
+ // Branch with link to register.
+ void blr(const Register& xn);
+
+ // Branch to register with return hint.
+ void ret(const Register& xn = lr);
+
+ // Unconditional branch to label.
+ void b(Label* label);
+
+ // Conditional branch to label.
+ void b(Label* label, Condition cond);
+
+ // Unconditional branch to PC offset.
+ void b(int imm26);
+
+ // Conditional branch to PC offset.
+ void b(int imm19, Condition cond);
+
+ // Branch with link to label.
+ void bl(Label* label);
+
+ // Branch with link to PC offset.
+ void bl(int imm26);
+
+ // Compare and branch to label if zero.
+ void cbz(const Register& rt, Label* label);
+
+ // Compare and branch to PC offset if zero.
+ void cbz(const Register& rt, int imm19);
+
+ // Compare and branch to label if not zero.
+ void cbnz(const Register& rt, Label* label);
+
+ // Compare and branch to PC offset if not zero.
+ void cbnz(const Register& rt, int imm19);
+
+ // Test bit and branch to label if zero.
+ void tbz(const Register& rt, unsigned bit_pos, Label* label);
+
+ // Test bit and branch to PC offset if zero.
+ void tbz(const Register& rt, unsigned bit_pos, int imm14);
+
+ // Test bit and branch to label if not zero.
+ void tbnz(const Register& rt, unsigned bit_pos, Label* label);
+
+ // Test bit and branch to PC offset if not zero.
+ void tbnz(const Register& rt, unsigned bit_pos, int imm14);
+
+ // Address calculation instructions.
+ // Calculate a PC-relative address. Unlike for branches the offset in adr is
+ // unscaled (i.e. the result can be unaligned).
+
+ // Calculate the address of a label.
+ void adr(const Register& rd, Label* label);
+
+ // Calculate the address of a PC offset.
+ void adr(const Register& rd, int imm21);
+
+ // Data Processing instructions.
+ // Add.
+ void add(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S = LeaveFlags);
+
+ // Compare negative.
+ void cmn(const Register& rn, const Operand& operand);
+
+ // Subtract.
+ void sub(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S = LeaveFlags);
+
+ // Compare.
+ void cmp(const Register& rn, const Operand& operand);
+
+ // Negate.
+ void neg(const Register& rd,
+ const Operand& operand,
+ FlagsUpdate S = LeaveFlags);
+
+ // Add with carry bit.
+ void adc(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S = LeaveFlags);
+
+ // Subtract with carry bit.
+ void sbc(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S = LeaveFlags);
+
+ // Negate with carry bit.
+ void ngc(const Register& rd,
+ const Operand& operand,
+ FlagsUpdate S = LeaveFlags);
+
+ // Logical instructions.
+ // Bitwise and (A & B).
+ void and_(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S = LeaveFlags);
+
+ // Bit test and set flags.
+ void tst(const Register& rn, const Operand& operand);
+
+ // Bit clear (A & ~B).
+ void bic(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S = LeaveFlags);
+
+ // Bitwise or (A | B).
+ void orr(const Register& rd, const Register& rn, const Operand& operand);
+
+ // Bitwise nor (A | ~B).
+ void orn(const Register& rd, const Register& rn, const Operand& operand);
+
+ // Bitwise eor/xor (A ^ B).
+ void eor(const Register& rd, const Register& rn, const Operand& operand);
+
+ // Bitwise enor/xnor (A ^ ~B).
+ void eon(const Register& rd, const Register& rn, const Operand& operand);
+
+ // Logical shift left by variable.
+ void lslv(const Register& rd, const Register& rn, const Register& rm);
+
+ // Logical shift right by variable.
+ void lsrv(const Register& rd, const Register& rn, const Register& rm);
+
+ // Arithmetic shift right by variable.
+ void asrv(const Register& rd, const Register& rn, const Register& rm);
+
+ // Rotate right by variable.
+ void rorv(const Register& rd, const Register& rn, const Register& rm);
+
+ // Bitfield instructions.
+ // Bitfield move.
+ void bfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms);
+
+ // Signed bitfield move.
+ void sbfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms);
+
+ // Unsigned bitfield move.
+ void ubfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms);
+
+ // Bfm aliases.
+ // Bitfield insert.
+ inline void bfi(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(width >= 1);
+ ASSERT(lsb + width <= rn.size());
+ bfm(rd, rn, (rd.size() - lsb) & (rd.size() - 1), width - 1);
+ }
+
+ // Bitfield extract and insert low.
+ inline void bfxil(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(width >= 1);
+ ASSERT(lsb + width <= rn.size());
+ bfm(rd, rn, lsb, lsb + width - 1);
+ }
+
+ // Sbfm aliases.
+ // Arithmetic shift right.
+ inline void asr(const Register& rd, const Register& rn, unsigned shift) {
+ ASSERT(shift < rd.size());
+ sbfm(rd, rn, shift, rd.size() - 1);
+ }
+
+ // Signed bitfield insert with zero at right.
+ inline void sbfiz(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(width >= 1);
+ ASSERT(lsb + width <= rn.size());
+ sbfm(rd, rn, (rd.size() - lsb) & (rd.size() - 1), width - 1);
+ }
+
+ // Signed bitfield extract.
+ inline void sbfx(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(width >= 1);
+ ASSERT(lsb + width <= rn.size());
+ sbfm(rd, rn, lsb, lsb + width - 1);
+ }
+
+ // Signed extend byte.
+ inline void sxtb(const Register& rd, const Register& rn) {
+ sbfm(rd, rn, 0, 7);
+ }
+
+ // Signed extend halfword.
+ inline void sxth(const Register& rd, const Register& rn) {
+ sbfm(rd, rn, 0, 15);
+ }
+
+ // Signed extend word.
+ inline void sxtw(const Register& rd, const Register& rn) {
+ sbfm(rd, rn, 0, 31);
+ }
+
+ // Ubfm aliases.
+ // Logical shift left.
+ inline void lsl(const Register& rd, const Register& rn, unsigned shift) {
+ unsigned reg_size = rd.size();
+ ASSERT(shift < reg_size);
+ ubfm(rd, rn, (reg_size - shift) % reg_size, reg_size - shift - 1);
+ }
+
+ // Logical shift right.
+ inline void lsr(const Register& rd, const Register& rn, unsigned shift) {
+ ASSERT(shift < rd.size());
+ ubfm(rd, rn, shift, rd.size() - 1);
+ }
+
+ // Unsigned bitfield insert with zero at right.
+ inline void ubfiz(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(width >= 1);
+ ASSERT(lsb + width <= rn.size());
+ ubfm(rd, rn, (rd.size() - lsb) & (rd.size() - 1), width - 1);
+ }
+
+ // Unsigned bitfield extract.
+ inline void ubfx(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(width >= 1);
+ ASSERT(lsb + width <= rn.size());
+ ubfm(rd, rn, lsb, lsb + width - 1);
+ }
+
+ // Unsigned extend byte.
+ inline void uxtb(const Register& rd, const Register& rn) {
+ ubfm(rd, rn, 0, 7);
+ }
+
+ // Unsigned extend halfword.
+ inline void uxth(const Register& rd, const Register& rn) {
+ ubfm(rd, rn, 0, 15);
+ }
+
+ // Unsigned extend word.
+ inline void uxtw(const Register& rd, const Register& rn) {
+ ubfm(rd, rn, 0, 31);
+ }
+
+ // Extract.
+ void extr(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ unsigned lsb);
+
+ // Conditional select: rd = cond ? rn : rm.
+ void csel(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond);
+
+ // Conditional select increment: rd = cond ? rn : rm + 1.
+ void csinc(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond);
+
+ // Conditional select inversion: rd = cond ? rn : ~rm.
+ void csinv(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond);
+
+ // Conditional select negation: rd = cond ? rn : -rm.
+ void csneg(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond);
+
+ // Conditional set: rd = cond ? 1 : 0.
+ void cset(const Register& rd, Condition cond);
+
+ // Conditional set mask: rd = cond ? -1 : 0.
+ void csetm(const Register& rd, Condition cond);
+
+ // Conditional increment: rd = cond ? rn + 1 : rn.
+ void cinc(const Register& rd, const Register& rn, Condition cond);
+
+ // Conditional invert: rd = cond ? ~rn : rn.
+ void cinv(const Register& rd, const Register& rn, Condition cond);
+
+ // Conditional negate: rd = cond ? -rn : rn.
+ void cneg(const Register& rd, const Register& rn, Condition cond);
+
+ // Rotate right.
+ inline void ror(const Register& rd, const Register& rs, unsigned shift) {
+ extr(rd, rs, rs, shift);
+ }
+
+ // Conditional comparison.
+ // Conditional compare negative.
+ void ccmn(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond);
+
+ // Conditional compare.
+ void ccmp(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond);
+
+ // Multiply.
+ void mul(const Register& rd, const Register& rn, const Register& rm);
+
+ // Negated multiply.
+ void mneg(const Register& rd, const Register& rn, const Register& rm);
+
+ // Signed long multiply: 32 x 32 -> 64-bit.
+ void smull(const Register& rd, const Register& rn, const Register& rm);
+
+ // Signed multiply high: 64 x 64 -> 64-bit <127:64>.
+ void smulh(const Register& xd, const Register& xn, const Register& xm);
+
+ // Multiply and accumulate.
+ void madd(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+
+ // Multiply and subtract.
+ void msub(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+
+ // Signed long multiply and accumulate: 32 x 32 + 64 -> 64-bit.
+ void smaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+
+ // Unsigned long multiply and accumulate: 32 x 32 + 64 -> 64-bit.
+ void umaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+
+ // Signed long multiply and subtract: 64 - (32 x 32) -> 64-bit.
+ void smsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+
+ // Unsigned long multiply and subtract: 64 - (32 x 32) -> 64-bit.
+ void umsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+
+ // Signed integer divide.
+ void sdiv(const Register& rd, const Register& rn, const Register& rm);
+
+ // Unsigned integer divide.
+ void udiv(const Register& rd, const Register& rn, const Register& rm);
+
+ // Bit reverse.
+ void rbit(const Register& rd, const Register& rn);
+
+ // Reverse bytes in 16-bit half words.
+ void rev16(const Register& rd, const Register& rn);
+
+ // Reverse bytes in 32-bit words.
+ void rev32(const Register& rd, const Register& rn);
+
+ // Reverse bytes.
+ void rev(const Register& rd, const Register& rn);
+
+ // Count leading zeroes.
+ void clz(const Register& rd, const Register& rn);
+
+ // Count leading sign bits.
+ void cls(const Register& rd, const Register& rn);
+
+ // Memory instructions.
+ // Load integer or FP register.
+ void ldr(const CPURegister& rt, const MemOperand& src);
+
+ // Store integer or FP register.
+ void str(const CPURegister& rt, const MemOperand& dst);
+
+ // Load word with sign extension.
+ void ldrsw(const Register& rt, const MemOperand& src);
+
+ // Load byte.
+ void ldrb(const Register& rt, const MemOperand& src);
+
+ // Store byte.
+ void strb(const Register& rt, const MemOperand& dst);
+
+ // Load byte with sign extension.
+ void ldrsb(const Register& rt, const MemOperand& src);
+
+ // Load half-word.
+ void ldrh(const Register& rt, const MemOperand& src);
+
+ // Store half-word.
+ void strh(const Register& rt, const MemOperand& dst);
+
+ // Load half-word with sign extension.
+ void ldrsh(const Register& rt, const MemOperand& src);
+
+ // Load integer or FP register pair.
+ void ldp(const CPURegister& rt, const CPURegister& rt2,
+ const MemOperand& src);
+
+ // Store integer or FP register pair.
+ void stp(const CPURegister& rt, const CPURegister& rt2,
+ const MemOperand& dst);
+
+ // Load word pair with sign extension.
+ void ldpsw(const Register& rt, const Register& rt2, const MemOperand& src);
+
+ // Load integer or FP register pair, non-temporal.
+ void ldnp(const CPURegister& rt, const CPURegister& rt2,
+ const MemOperand& src);
+
+ // Store integer or FP register pair, non-temporal.
+ void stnp(const CPURegister& rt, const CPURegister& rt2,
+ const MemOperand& dst);
+
+ // Load literal to register.
+ void ldr(const Register& rt, uint64_t imm);
+
+ // Load literal to FP register.
+ void ldr(const FPRegister& ft, double imm);
+
+ // Move instructions. The default shift of -1 indicates that the move
+ // instruction will calculate an appropriate 16-bit immediate and left shift
+ // that is equal to the 64-bit immediate argument. If an explicit left shift
+ // is specified (0, 16, 32 or 48), the immediate must be a 16-bit value.
+ //
+ // For movk, an explicit shift can be used to indicate which half word should
+ // be overwritten, eg. movk(x0, 0, 0) will overwrite the least-significant
+ // half word with zero, whereas movk(x0, 0, 48) will overwrite the
+ // most-significant.
+
+ // Move immediate and keep.
+ void movk(const Register& rd, uint64_t imm, int shift = -1) {
+ MoveWide(rd, imm, shift, MOVK);
+ }
+
+ // Move inverted immediate.
+ void movn(const Register& rd, uint64_t imm, int shift = -1) {
+ MoveWide(rd, imm, shift, MOVN);
+ }
+
+ // Move immediate.
+ void movz(const Register& rd, uint64_t imm, int shift = -1) {
+ MoveWide(rd, imm, shift, MOVZ);
+ }
+
+ // Misc instructions.
+ // Monitor debug-mode breakpoint.
+ void brk(int code);
+
+ // Halting debug-mode breakpoint.
+ void hlt(int code);
+
+ // Move register to register.
+ void mov(const Register& rd, const Register& rn);
+
+ // Move inverted operand to register.
+ void mvn(const Register& rd, const Operand& operand);
+
+ // System instructions.
+ // Move to register from system register.
+ void mrs(const Register& rt, SystemRegister sysreg);
+
+ // Move from register to system register.
+ void msr(SystemRegister sysreg, const Register& rt);
+
+ // System hint.
+ void hint(SystemHint code);
+
+ // Alias for system instructions.
+ // No-op.
+ void nop() {
+ hint(NOP);
+ }
+
+ // FP instructions.
+ // Move immediate to FP register.
+ void fmov(FPRegister fd, double imm);
+
+ // Move FP register to register.
+ void fmov(Register rd, FPRegister fn);
+
+ // Move register to FP register.
+ void fmov(FPRegister fd, Register rn);
+
+ // Move FP register to FP register.
+ void fmov(FPRegister fd, FPRegister fn);
+
+ // FP add.
+ void fadd(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+
+ // FP subtract.
+ void fsub(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+
+ // FP multiply.
+ void fmul(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+
+ // FP multiply and subtract.
+ void fmsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa);
+
+ // FP divide.
+ void fdiv(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+
+ // FP maximum.
+ void fmax(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+
+ // FP minimum.
+ void fmin(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+
+ // FP absolute.
+ void fabs(const FPRegister& fd, const FPRegister& fn);
+
+ // FP negate.
+ void fneg(const FPRegister& fd, const FPRegister& fn);
+
+ // FP square root.
+ void fsqrt(const FPRegister& fd, const FPRegister& fn);
+
+ // FP round to integer (nearest with ties to even).
+ void frintn(const FPRegister& fd, const FPRegister& fn);
+
+ // FP round to integer (towards zero).
+ void frintz(const FPRegister& fd, const FPRegister& fn);
+
+ // FP compare registers.
+ void fcmp(const FPRegister& fn, const FPRegister& fm);
+
+ // FP compare immediate.
+ void fcmp(const FPRegister& fn, double value);
+
+ // FP conditional compare.
+ void fccmp(const FPRegister& fn,
+ const FPRegister& fm,
+ StatusFlags nzcv,
+ Condition cond);
+
+ // FP conditional select.
+ void fcsel(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ Condition cond);
+
+ // Common FP Convert function.
+ void FPConvertToInt(const Register& rd,
+ const FPRegister& fn,
+ FPIntegerConvertOp op);
+
+ // FP convert between single and double precision.
+ void fcvt(const FPRegister& fd, const FPRegister& fn);
+
+ // Convert FP to unsigned integer (round towards -infinity).
+ void fcvtmu(const Register& rd, const FPRegister& fn);
+
+ // Convert FP to signed integer (round towards -infinity).
+ void fcvtms(const Register& rd, const FPRegister& fn);
+
+ // Convert FP to unsigned integer (nearest with ties to even).
+ void fcvtnu(const Register& rd, const FPRegister& fn);
+
+ // Convert FP to signed integer (nearest with ties to even).
+ void fcvtns(const Register& rd, const FPRegister& fn);
+
+ // Convert FP to unsigned integer (round towards zero).
+ void fcvtzu(const Register& rd, const FPRegister& fn);
+
+ // Convert FP to signed integer (round towards zero).
+ void fcvtzs(const Register& rd, const FPRegister& fn);
+
+ // Convert signed integer or fixed point to FP.
+ void scvtf(const FPRegister& fd, const Register& rn, unsigned fbits = 0);
+
+ // Convert unsigned integer or fixed point to FP.
+ void ucvtf(const FPRegister& fd, const Register& rn, unsigned fbits = 0);
+
+ // Emit generic instructions.
+ // Emit raw instructions into the instruction stream.
+ inline void dci(Instr raw_inst) { Emit(raw_inst); }
+
+ // Emit 32 bits of data into the instruction stream.
+ inline void dc32(uint32_t data) { EmitData(&data, sizeof(data)); }
+
+ // Emit 64 bits of data into the instruction stream.
+ inline void dc64(uint64_t data) { EmitData(&data, sizeof(data)); }
+
+ // Copy a string into the instruction stream, including the terminating NULL
+ // character. The instruction pointer (pc_) is then aligned correctly for
+ // subsequent instructions.
+ void EmitStringData(const char * string) {
+ ASSERT(string != NULL);
+
+ size_t len = strlen(string) + 1;
+ EmitData(string, len);
+
+ // Pad with NULL characters until pc_ is aligned.
+ const char pad[] = {'\0', '\0', '\0', '\0'};
+ ASSERT(sizeof(pad) == kInstructionSize);
+ Instruction* next_pc = AlignUp(pc_, kInstructionSize);
+ EmitData(&pad, next_pc - pc_);
+ }
+
+ // Code generation helpers.
+
+ // Register encoding.
+ static Instr Rd(CPURegister rd) {
+ ASSERT(rd.code() != kSPRegInternalCode);
+ return rd.code() << Rd_offset;
+ }
+
+ static Instr Rn(CPURegister rn) {
+ ASSERT(rn.code() != kSPRegInternalCode);
+ return rn.code() << Rn_offset;
+ }
+
+ static Instr Rm(CPURegister rm) {
+ ASSERT(rm.code() != kSPRegInternalCode);
+ return rm.code() << Rm_offset;
+ }
+
+ static Instr Ra(CPURegister ra) {
+ ASSERT(ra.code() != kSPRegInternalCode);
+ return ra.code() << Ra_offset;
+ }
+
+ static Instr Rt(CPURegister rt) {
+ ASSERT(rt.code() != kSPRegInternalCode);
+ return rt.code() << Rt_offset;
+ }
+
+ static Instr Rt2(CPURegister rt2) {
+ ASSERT(rt2.code() != kSPRegInternalCode);
+ return rt2.code() << Rt2_offset;
+ }
+
+ // These encoding functions allow the stack pointer to be encoded, and
+ // disallow the zero register.
+ static Instr RdSP(Register rd) {
+ ASSERT(!rd.IsZero());
+ return (rd.code() & kRegCodeMask) << Rd_offset;
+ }
+
+ static Instr RnSP(Register rn) {
+ ASSERT(!rn.IsZero());
+ return (rn.code() & kRegCodeMask) << Rn_offset;
+ }
+
+ // Flags encoding.
+ static Instr Flags(FlagsUpdate S) {
+ if (S == SetFlags) {
+ return 1 << FlagsUpdate_offset;
+ } else if (S == LeaveFlags) {
+ return 0 << FlagsUpdate_offset;
+ }
+ UNREACHABLE();
+ return 0;
+ }
+
+ static Instr Cond(Condition cond) {
+ return cond << Condition_offset;
+ }
+
+ // PC-relative address encoding.
+ static Instr ImmPCRelAddress(int imm21) {
+ ASSERT(is_int21(imm21));
+ Instr imm = static_cast<Instr>(truncate_to_int21(imm21));
+ Instr immhi = (imm >> ImmPCRelLo_width) << ImmPCRelHi_offset;
+ Instr immlo = imm << ImmPCRelLo_offset;
+ return (immhi & ImmPCRelHi_mask) | (immlo & ImmPCRelLo_mask);
+ }
+
+ // Branch encoding.
+ static Instr ImmUncondBranch(int imm26) {
+ ASSERT(is_int26(imm26));
+ return truncate_to_int26(imm26) << ImmUncondBranch_offset;
+ }
+
+ static Instr ImmCondBranch(int imm19) {
+ ASSERT(is_int19(imm19));
+ return truncate_to_int19(imm19) << ImmCondBranch_offset;
+ }
+
+ static Instr ImmCmpBranch(int imm19) {
+ ASSERT(is_int19(imm19));
+ return truncate_to_int19(imm19) << ImmCmpBranch_offset;
+ }
+
+ static Instr ImmTestBranch(int imm14) {
+ ASSERT(is_int14(imm14));
+ return truncate_to_int14(imm14) << ImmTestBranch_offset;
+ }
+
+ static Instr ImmTestBranchBit(unsigned bit_pos) {
+ ASSERT(is_uint6(bit_pos));
+ // Subtract five from the shift offset, as we need bit 5 from bit_pos.
+ unsigned b5 = bit_pos << (ImmTestBranchBit5_offset - 5);
+ unsigned b40 = bit_pos << ImmTestBranchBit40_offset;
+ b5 &= ImmTestBranchBit5_mask;
+ b40 &= ImmTestBranchBit40_mask;
+ return b5 | b40;
+ }
+
+ // Data Processing encoding.
+ static Instr SF(Register rd) {
+ return rd.Is64Bits() ? SixtyFourBits : ThirtyTwoBits;
+ }
+
+ static Instr ImmAddSub(int64_t imm) {
+ ASSERT(IsImmAddSub(imm));
+ if (is_uint12(imm)) { // No shift required.
+ return imm << ImmAddSub_offset;
+ } else {
+ return ((imm >> 12) << ImmAddSub_offset) | (1 << ShiftAddSub_offset);
+ }
+ }
+
+ static inline Instr ImmS(unsigned imms, unsigned reg_size) {
+ ASSERT(((reg_size == kXRegSize) && is_uint6(imms)) ||
+ ((reg_size == kWRegSize) && is_uint5(imms)));
+ USE(reg_size);
+ return imms << ImmS_offset;
+ }
+
+ static inline Instr ImmR(unsigned immr, unsigned reg_size) {
+ ASSERT(((reg_size == kXRegSize) && is_uint6(immr)) ||
+ ((reg_size == kWRegSize) && is_uint5(immr)));
+ USE(reg_size);
+ ASSERT(is_uint6(immr));
+ return immr << ImmR_offset;
+ }
+
+ static inline Instr ImmSetBits(unsigned imms, unsigned reg_size) {
+ ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
+ ASSERT(is_uint6(imms));
+ ASSERT((reg_size == kXRegSize) || is_uint6(imms + 3));
+ USE(reg_size);
+ return imms << ImmSetBits_offset;
+ }
+
+ static inline Instr ImmRotate(unsigned immr, unsigned reg_size) {
+ ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
+ ASSERT(((reg_size == kXRegSize) && is_uint6(immr)) ||
+ ((reg_size == kWRegSize) && is_uint5(immr)));
+ USE(reg_size);
+ return immr << ImmRotate_offset;
+ }
+
+ static inline Instr ImmLLiteral(int imm19) {
+ ASSERT(is_int19(imm19));
+ return truncate_to_int19(imm19) << ImmLLiteral_offset;
+ }
+
+ static inline Instr BitN(unsigned bitn, unsigned reg_size) {
+ ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
+ ASSERT((reg_size == kXRegSize) || (bitn == 0));
+ USE(reg_size);
+ return bitn << BitN_offset;
+ }
+
+ static Instr ShiftDP(Shift shift) {
+ ASSERT(shift == LSL || shift == LSR || shift == ASR || shift == ROR);
+ return shift << ShiftDP_offset;
+ }
+
+ static Instr ImmDPShift(unsigned amount) {
+ ASSERT(is_uint6(amount));
+ return amount << ImmDPShift_offset;
+ }
+
+ static Instr ExtendMode(Extend extend) {
+ return extend << ExtendMode_offset;
+ }
+
+ static Instr ImmExtendShift(unsigned left_shift) {
+ ASSERT(left_shift <= 4);
+ return left_shift << ImmExtendShift_offset;
+ }
+
+ static Instr ImmCondCmp(unsigned imm) {
+ ASSERT(is_uint5(imm));
+ return imm << ImmCondCmp_offset;
+ }
+
+ static Instr Nzcv(StatusFlags nzcv) {
+ return ((nzcv >> Flags_offset) & 0xf) << Nzcv_offset;
+ }
+
+ // MemOperand offset encoding.
+ static Instr ImmLSUnsigned(int imm12) {
+ ASSERT(is_uint12(imm12));
+ return imm12 << ImmLSUnsigned_offset;
+ }
+
+ static Instr ImmLS(int imm9) {
+ ASSERT(is_int9(imm9));
+ return truncate_to_int9(imm9) << ImmLS_offset;
+ }
+
+ static Instr ImmLSPair(int imm7, LSDataSize size) {
+ ASSERT(((imm7 >> size) << size) == imm7);
+ int scaled_imm7 = imm7 >> size;
+ ASSERT(is_int7(scaled_imm7));
+ return truncate_to_int7(scaled_imm7) << ImmLSPair_offset;
+ }
+
+ static Instr ImmShiftLS(unsigned shift_amount) {
+ ASSERT(is_uint1(shift_amount));
+ return shift_amount << ImmShiftLS_offset;
+ }
+
+ static Instr ImmException(int imm16) {
+ ASSERT(is_uint16(imm16));
+ return imm16 << ImmException_offset;
+ }
+
+ static Instr ImmSystemRegister(int imm15) {
+ ASSERT(is_uint15(imm15));
+ return imm15 << ImmSystemRegister_offset;
+ }
+
+ static Instr ImmHint(int imm7) {
+ ASSERT(is_uint7(imm7));
+ return imm7 << ImmHint_offset;
+ }
+
+ static LSDataSize CalcLSDataSize(LoadStoreOp op) {
+ ASSERT((SizeLS_offset + SizeLS_width) == (kInstructionSize * 8));
+ return static_cast<LSDataSize>(op >> SizeLS_offset);
+ }
+
+ // Move immediates encoding.
+ static Instr ImmMoveWide(uint64_t imm) {
+ ASSERT(is_uint16(imm));
+ return imm << ImmMoveWide_offset;
+ }
+
+ static Instr ShiftMoveWide(int64_t shift) {
+ ASSERT(is_uint2(shift));
+ return shift << ShiftMoveWide_offset;
+ }
+
+ // FP Immediates.
+ static Instr ImmFP32(float imm);
+ static Instr ImmFP64(double imm);
+
+ // FP register type.
+ static Instr FPType(FPRegister fd) {
+ return fd.Is64Bits() ? FP64 : FP32;
+ }
+
+ static Instr FPScale(unsigned scale) {
+ ASSERT(is_uint6(scale));
+ return scale << FPScale_offset;
+ }
+
+ // Size of the code generated in bytes
+ uint64_t SizeOfCodeGenerated() const {
+ ASSERT((pc_ >= buffer_) && (pc_ < (buffer_ + buffer_size_)));
+ return pc_ - buffer_;
+ }
+
+ // Size of the code generated since label to the current position.
+ uint64_t SizeOfCodeGeneratedSince(Label* label) const {
+ ASSERT(label->IsBound());
+ ASSERT((pc_ >= label->target()) && (pc_ < (buffer_ + buffer_size_)));
+ return pc_ - label->target();
+ }
+
+
+ inline void BlockLiteralPool() {
+ literal_pool_monitor_++;
+ }
+
+ inline void ReleaseLiteralPool() {
+ if (--literal_pool_monitor_ == 0) {
+ // Has the literal pool been blocked for too long?
+ ASSERT(literals_.empty() ||
+ (pc_ < (literals_.back()->pc_ + kMaxLoadLiteralRange)));
+ }
+ }
+
+ inline bool IsLiteralPoolBlocked() {
+ return literal_pool_monitor_ != 0;
+ }
+
+ void CheckLiteralPool(LiteralPoolEmitOption option = JumpRequired);
+ void EmitLiteralPool(LiteralPoolEmitOption option = NoJumpRequired);
+ size_t LiteralPoolSize();
+
+ protected:
+ inline const Register& AppropriateZeroRegFor(const CPURegister& reg) const {
+ return reg.Is64Bits() ? xzr : wzr;
+ }
+
+
+ void LoadStore(const CPURegister& rt,
+ const MemOperand& addr,
+ LoadStoreOp op);
+ static bool IsImmLSUnscaled(ptrdiff_t offset);
+ static bool IsImmLSScaled(ptrdiff_t offset, LSDataSize size);
+
+ void Logical(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ LogicalOp op);
+ void LogicalImmediate(const Register& rd,
+ const Register& rn,
+ unsigned n,
+ unsigned imm_s,
+ unsigned imm_r,
+ LogicalOp op);
+ static bool IsImmLogical(uint64_t value,
+ unsigned width,
+ unsigned* n,
+ unsigned* imm_s,
+ unsigned* imm_r);
+
+ void ConditionalCompare(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond,
+ ConditionalCompareOp op);
+ static bool IsImmConditionalCompare(int64_t immediate);
+
+ void AddSubWithCarry(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubWithCarryOp op);
+
+ // Functions for emulating operands not directly supported by the instruction
+ // set.
+ void EmitShift(const Register& rd,
+ const Register& rn,
+ Shift shift,
+ unsigned amount);
+ void EmitExtendShift(const Register& rd,
+ const Register& rn,
+ Extend extend,
+ unsigned left_shift);
+
+ void AddSub(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubOp op);
+ static bool IsImmAddSub(int64_t immediate);
+
+ // Find an appropriate LoadStoreOp or LoadStorePairOp for the specified
+ // registers. Only simple loads are supported; sign- and zero-extension (such
+ // as in LDPSW_x or LDRB_w) are not supported.
+ static LoadStoreOp LoadOpFor(const CPURegister& rt);
+ static LoadStorePairOp LoadPairOpFor(const CPURegister& rt,
+ const CPURegister& rt2);
+ static LoadStoreOp StoreOpFor(const CPURegister& rt);
+ static LoadStorePairOp StorePairOpFor(const CPURegister& rt,
+ const CPURegister& rt2);
+ static LoadStorePairNonTemporalOp LoadPairNonTemporalOpFor(
+ const CPURegister& rt, const CPURegister& rt2);
+ static LoadStorePairNonTemporalOp StorePairNonTemporalOpFor(
+ const CPURegister& rt, const CPURegister& rt2);
+
+
+ private:
+ // Instruction helpers.
+ void MoveWide(const Register& rd,
+ uint64_t imm,
+ int shift,
+ MoveWideImmediateOp mov_op);
+ void DataProcShiftedRegister(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ Instr op);
+ void DataProcExtendedRegister(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ Instr op);
+ void LoadStorePair(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& addr,
+ LoadStorePairOp op);
+ void LoadStorePairNonTemporal(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& addr,
+ LoadStorePairNonTemporalOp op);
+ void LoadLiteral(const CPURegister& rt, uint64_t imm, LoadLiteralOp op);
+ void ConditionalSelect(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond,
+ ConditionalSelectOp op);
+ void DataProcessing1Source(const Register& rd,
+ const Register& rn,
+ DataProcessing1SourceOp op);
+ void DataProcessing3Source(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra,
+ DataProcessing3SourceOp op);
+ void FPDataProcessing1Source(const FPRegister& fd,
+ const FPRegister& fn,
+ FPDataProcessing1SourceOp op);
+ void FPDataProcessing2Source(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ FPDataProcessing2SourceOp op);
+ void FPDataProcessing3Source(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa,
+ FPDataProcessing3SourceOp op);
+
+ // Encoding helpers.
+ static bool IsImmFP32(float imm);
+ static bool IsImmFP64(double imm);
+
+ void RecordLiteral(int64_t imm, unsigned size);
+
+ // Emit the instruction at pc_.
+ void Emit(Instr instruction) {
+ ASSERT(sizeof(*pc_) == 1);
+ ASSERT(sizeof(instruction) == kInstructionSize);
+ ASSERT((pc_ + sizeof(instruction)) <= (buffer_ + buffer_size_));
+
+#ifdef DEBUG
+ finalized_ = false;
+#endif
+
+ memcpy(pc_, &instruction, sizeof(instruction));
+ pc_ += sizeof(instruction);
+ CheckBufferSpace();
+ }
+
+ // Emit data inline in the instruction stream.
+ void EmitData(void const * data, unsigned size) {
+ ASSERT(sizeof(*pc_) == 1);
+ ASSERT((pc_ + size) <= (buffer_ + buffer_size_));
+
+#ifdef DEBUG
+ finalized_ = false;
+#endif
+
+ // TODO: Record this 'instruction' as data, so that it can be disassembled
+ // correctly.
+ memcpy(pc_, data, size);
+ pc_ += size;
+ CheckBufferSpace();
+ }
+
+ inline void CheckBufferSpace() {
+ ASSERT(pc_ < (buffer_ + buffer_size_));
+ if (pc_ > next_literal_pool_check_) {
+ CheckLiteralPool();
+ }
+ }
+
+ // The buffer into which code and relocation info are generated.
+ Instruction* buffer_;
+ // Buffer size, in bytes.
+ unsigned buffer_size_;
+ Instruction* pc_;
+ std::list<Literal*> literals_;
+ Instruction* next_literal_pool_check_;
+ unsigned literal_pool_monitor_;
+
+ friend class BlockLiteralPoolScope;
+
+#ifdef DEBUG
+ bool finalized_;
+#endif
+};
+
+class BlockLiteralPoolScope {
+ public:
+ explicit BlockLiteralPoolScope(Assembler* assm) : assm_(assm) {
+ assm_->BlockLiteralPool();
+ }
+
+ ~BlockLiteralPoolScope() {
+ assm_->ReleaseLiteralPool();
+ }
+
+ private:
+ Assembler* assm_;
+};
+} // namespace vixl
+
+#endif // VIXL_A64_ASSEMBLER_A64_H_
diff --git a/disas/libvixl/a64/constants-a64.h b/disas/libvixl/a64/constants-a64.h
new file mode 100644
index 0000000000..2e0336dd0f
--- /dev/null
+++ b/disas/libvixl/a64/constants-a64.h
@@ -0,0 +1,1104 @@
+// Copyright 2013, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef VIXL_A64_CONSTANTS_A64_H_
+#define VIXL_A64_CONSTANTS_A64_H_
+
+namespace vixl {
+
+const unsigned kNumberOfRegisters = 32;
+const unsigned kNumberOfFPRegisters = 32;
+// Callee saved registers are x21-x30(lr).
+const int kNumberOfCalleeSavedRegisters = 10;
+const int kFirstCalleeSavedRegisterIndex = 21;
+// Callee saved FP registers are d8-d15.
+const int kNumberOfCalleeSavedFPRegisters = 8;
+const int kFirstCalleeSavedFPRegisterIndex = 8;
+
+#define REGISTER_CODE_LIST(R) \
+R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
+R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
+R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
+R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
+
+#define INSTRUCTION_FIELDS_LIST(V_) \
+/* Register fields */ \
+V_(Rd, 4, 0, Bits) /* Destination register. */ \
+V_(Rn, 9, 5, Bits) /* First source register. */ \
+V_(Rm, 20, 16, Bits) /* Second source register. */ \
+V_(Ra, 14, 10, Bits) /* Third source register. */ \
+V_(Rt, 4, 0, Bits) /* Load dest / store source. */ \
+V_(Rt2, 14, 10, Bits) /* Load second dest / */ \
+ /* store second source. */ \
+V_(PrefetchMode, 4, 0, Bits) \
+ \
+/* Common bits */ \
+V_(SixtyFourBits, 31, 31, Bits) \
+V_(FlagsUpdate, 29, 29, Bits) \
+ \
+/* PC relative addressing */ \
+V_(ImmPCRelHi, 23, 5, SignedBits) \
+V_(ImmPCRelLo, 30, 29, Bits) \
+ \
+/* Add/subtract/logical shift register */ \
+V_(ShiftDP, 23, 22, Bits) \
+V_(ImmDPShift, 15, 10, Bits) \
+ \
+/* Add/subtract immediate */ \
+V_(ImmAddSub, 21, 10, Bits) \
+V_(ShiftAddSub, 23, 22, Bits) \
+ \
+/* Add/substract extend */ \
+V_(ImmExtendShift, 12, 10, Bits) \
+V_(ExtendMode, 15, 13, Bits) \
+ \
+/* Move wide */ \
+V_(ImmMoveWide, 20, 5, Bits) \
+V_(ShiftMoveWide, 22, 21, Bits) \
+ \
+/* Logical immediate, bitfield and extract */ \
+V_(BitN, 22, 22, Bits) \
+V_(ImmRotate, 21, 16, Bits) \
+V_(ImmSetBits, 15, 10, Bits) \
+V_(ImmR, 21, 16, Bits) \
+V_(ImmS, 15, 10, Bits) \
+ \
+/* Test and branch immediate */ \
+V_(ImmTestBranch, 18, 5, SignedBits) \
+V_(ImmTestBranchBit40, 23, 19, Bits) \
+V_(ImmTestBranchBit5, 31, 31, Bits) \
+ \
+/* Conditionals */ \
+V_(Condition, 15, 12, Bits) \
+V_(ConditionBranch, 3, 0, Bits) \
+V_(Nzcv, 3, 0, Bits) \
+V_(ImmCondCmp, 20, 16, Bits) \
+V_(ImmCondBranch, 23, 5, SignedBits) \
+ \
+/* Floating point */ \
+V_(FPType, 23, 22, Bits) \
+V_(ImmFP, 20, 13, Bits) \
+V_(FPScale, 15, 10, Bits) \
+ \
+/* Load Store */ \
+V_(ImmLS, 20, 12, SignedBits) \
+V_(ImmLSUnsigned, 21, 10, Bits) \
+V_(ImmLSPair, 21, 15, SignedBits) \
+V_(SizeLS, 31, 30, Bits) \
+V_(ImmShiftLS, 12, 12, Bits) \
+ \
+/* Other immediates */ \
+V_(ImmUncondBranch, 25, 0, SignedBits) \
+V_(ImmCmpBranch, 23, 5, SignedBits) \
+V_(ImmLLiteral, 23, 5, SignedBits) \
+V_(ImmException, 20, 5, Bits) \
+V_(ImmHint, 11, 5, Bits) \
+ \
+/* System (MRS, MSR) */ \
+V_(ImmSystemRegister, 19, 5, Bits) \
+V_(SysO0, 19, 19, Bits) \
+V_(SysOp1, 18, 16, Bits) \
+V_(SysOp2, 7, 5, Bits) \
+V_(CRn, 15, 12, Bits) \
+V_(CRm, 11, 8, Bits) \
+
+
+#define SYSTEM_REGISTER_FIELDS_LIST(V_, M_) \
+/* NZCV */ \
+V_(Flags, 31, 28, Bits) \
+V_(N, 31, 31, Bits) \
+V_(Z, 30, 30, Bits) \
+V_(C, 29, 29, Bits) \
+V_(V, 28, 28, Bits) \
+M_(NZCV, Flags_mask) \
+ \
+/* FPCR */ \
+V_(AHP, 26, 26, Bits) \
+V_(DN, 25, 25, Bits) \
+V_(FZ, 24, 24, Bits) \
+V_(RMode, 23, 22, Bits) \
+M_(FPCR, AHP_mask | DN_mask | FZ_mask | RMode_mask)
+
+
+// Fields offsets.
+#define DECLARE_FIELDS_OFFSETS(Name, HighBit, LowBit, X) \
+const int Name##_offset = LowBit; \
+const int Name##_width = HighBit - LowBit + 1; \
+const uint32_t Name##_mask = ((1 << Name##_width) - 1) << LowBit;
+#define NOTHING(A, B)
+INSTRUCTION_FIELDS_LIST(DECLARE_FIELDS_OFFSETS)
+SYSTEM_REGISTER_FIELDS_LIST(DECLARE_FIELDS_OFFSETS, NOTHING)
+#undef NOTHING
+#undef DECLARE_FIELDS_BITS
+
+// ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST), formed
+// from ImmPCRelLo and ImmPCRelHi.
+const int ImmPCRel_mask = ImmPCRelLo_mask | ImmPCRelHi_mask;
+
+// Condition codes.
+enum Condition {
+ eq = 0,
+ ne = 1,
+ hs = 2,
+ lo = 3,
+ mi = 4,
+ pl = 5,
+ vs = 6,
+ vc = 7,
+ hi = 8,
+ ls = 9,
+ ge = 10,
+ lt = 11,
+ gt = 12,
+ le = 13,
+ al = 14,
+ nv = 15 // Behaves as always/al.
+};
+
+inline Condition InvertCondition(Condition cond) {
+ // Conditions al and nv behave identically, as "always true". They can't be
+ // inverted, because there is no "always false" condition.
+ ASSERT((cond != al) && (cond != nv));
+ return static_cast<Condition>(cond ^ 1);
+}
+
+enum FlagsUpdate {
+ SetFlags = 1,
+ LeaveFlags = 0
+};
+
+enum StatusFlags {
+ NoFlag = 0,
+
+ // Derive the flag combinations from the system register bit descriptions.
+ NFlag = N_mask,
+ ZFlag = Z_mask,
+ CFlag = C_mask,
+ VFlag = V_mask,
+ NZFlag = NFlag | ZFlag,
+ NCFlag = NFlag | CFlag,
+ NVFlag = NFlag | VFlag,
+ ZCFlag = ZFlag | CFlag,
+ ZVFlag = ZFlag | VFlag,
+ CVFlag = CFlag | VFlag,
+ NZCFlag = NFlag | ZFlag | CFlag,
+ NZVFlag = NFlag | ZFlag | VFlag,
+ NCVFlag = NFlag | CFlag | VFlag,
+ ZCVFlag = ZFlag | CFlag | VFlag,
+ NZCVFlag = NFlag | ZFlag | CFlag | VFlag,
+
+ // Floating-point comparison results.
+ FPEqualFlag = ZCFlag,
+ FPLessThanFlag = NFlag,
+ FPGreaterThanFlag = CFlag,
+ FPUnorderedFlag = CVFlag
+};
+
+enum Shift {
+ NO_SHIFT = -1,
+ LSL = 0x0,
+ LSR = 0x1,
+ ASR = 0x2,
+ ROR = 0x3
+};
+
+enum Extend {
+ NO_EXTEND = -1,
+ UXTB = 0,
+ UXTH = 1,
+ UXTW = 2,
+ UXTX = 3,
+ SXTB = 4,
+ SXTH = 5,
+ SXTW = 6,
+ SXTX = 7
+};
+
+enum SystemHint {
+ NOP = 0,
+ YIELD = 1,
+ WFE = 2,
+ WFI = 3,
+ SEV = 4,
+ SEVL = 5
+};
+
+// System/special register names.
+// This information is not encoded as one field but as the concatenation of
+// multiple fields (Op0<0>, Op1, Crn, Crm, Op2).
+enum SystemRegister {
+ NZCV = ((0x1 << SysO0_offset) |
+ (0x3 << SysOp1_offset) |
+ (0x4 << CRn_offset) |
+ (0x2 << CRm_offset) |
+ (0x0 << SysOp2_offset)) >> ImmSystemRegister_offset,
+ FPCR = ((0x1 << SysO0_offset) |
+ (0x3 << SysOp1_offset) |
+ (0x4 << CRn_offset) |
+ (0x4 << CRm_offset) |
+ (0x0 << SysOp2_offset)) >> ImmSystemRegister_offset
+};
+
+// Instruction enumerations.
+//
+// These are the masks that define a class of instructions, and the list of
+// instructions within each class. Each enumeration has a Fixed, FMask and
+// Mask value.
+//
+// Fixed: The fixed bits in this instruction class.
+// FMask: The mask used to extract the fixed bits in the class.
+// Mask: The mask used to identify the instructions within a class.
+//
+// The enumerations can be used like this:
+//
+// ASSERT(instr->Mask(PCRelAddressingFMask) == PCRelAddressingFixed);
+// switch(instr->Mask(PCRelAddressingMask)) {
+// case ADR: Format("adr 'Xd, 'AddrPCRelByte"); break;
+// case ADRP: Format("adrp 'Xd, 'AddrPCRelPage"); break;
+// default: printf("Unknown instruction\n");
+// }
+
+
+// Generic fields.
+enum GenericInstrField {
+ SixtyFourBits = 0x80000000,
+ ThirtyTwoBits = 0x00000000,
+ FP32 = 0x00000000,
+ FP64 = 0x00400000
+};
+
+// PC relative addressing.
+enum PCRelAddressingOp {
+ PCRelAddressingFixed = 0x10000000,
+ PCRelAddressingFMask = 0x1F000000,
+ PCRelAddressingMask = 0x9F000000,
+ ADR = PCRelAddressingFixed | 0x00000000,
+ ADRP = PCRelAddressingFixed | 0x80000000
+};
+
+// Add/sub (immediate, shifted and extended.)
+const int kSFOffset = 31;
+enum AddSubOp {
+ AddSubOpMask = 0x60000000,
+ AddSubSetFlagsBit = 0x20000000,
+ ADD = 0x00000000,
+ ADDS = ADD | AddSubSetFlagsBit,
+ SUB = 0x40000000,
+ SUBS = SUB | AddSubSetFlagsBit
+};
+
+#define ADD_SUB_OP_LIST(V) \
+ V(ADD), \
+ V(ADDS), \
+ V(SUB), \
+ V(SUBS)
+
+enum AddSubImmediateOp {
+ AddSubImmediateFixed = 0x11000000,
+ AddSubImmediateFMask = 0x1F000000,
+ AddSubImmediateMask = 0xFF000000,
+ #define ADD_SUB_IMMEDIATE(A) \
+ A##_w_imm = AddSubImmediateFixed | A, \
+ A##_x_imm = AddSubImmediateFixed | A | SixtyFourBits
+ ADD_SUB_OP_LIST(ADD_SUB_IMMEDIATE)
+ #undef ADD_SUB_IMMEDIATE
+};
+
+enum AddSubShiftedOp {
+ AddSubShiftedFixed = 0x0B000000,
+ AddSubShiftedFMask = 0x1F200000,
+ AddSubShiftedMask = 0xFF200000,
+ #define ADD_SUB_SHIFTED(A) \
+ A##_w_shift = AddSubShiftedFixed | A, \
+ A##_x_shift = AddSubShiftedFixed | A | SixtyFourBits
+ ADD_SUB_OP_LIST(ADD_SUB_SHIFTED)
+ #undef ADD_SUB_SHIFTED
+};
+
+enum AddSubExtendedOp {
+ AddSubExtendedFixed = 0x0B200000,
+ AddSubExtendedFMask = 0x1F200000,
+ AddSubExtendedMask = 0xFFE00000,
+ #define ADD_SUB_EXTENDED(A) \
+ A##_w_ext = AddSubExtendedFixed | A, \
+ A##_x_ext = AddSubExtendedFixed | A | SixtyFourBits
+ ADD_SUB_OP_LIST(ADD_SUB_EXTENDED)
+ #undef ADD_SUB_EXTENDED
+};
+
+// Add/sub with carry.
+enum AddSubWithCarryOp {
+ AddSubWithCarryFixed = 0x1A000000,
+ AddSubWithCarryFMask = 0x1FE00000,
+ AddSubWithCarryMask = 0xFFE0FC00,
+ ADC_w = AddSubWithCarryFixed | ADD,
+ ADC_x = AddSubWithCarryFixed | ADD | SixtyFourBits,
+ ADC = ADC_w,
+ ADCS_w = AddSubWithCarryFixed | ADDS,
+ ADCS_x = AddSubWithCarryFixed | ADDS | SixtyFourBits,
+ SBC_w = AddSubWithCarryFixed | SUB,
+ SBC_x = AddSubWithCarryFixed | SUB | SixtyFourBits,
+ SBC = SBC_w,
+ SBCS_w = AddSubWithCarryFixed | SUBS,
+ SBCS_x = AddSubWithCarryFixed | SUBS | SixtyFourBits
+};
+
+
+// Logical (immediate and shifted register).
+enum LogicalOp {
+ LogicalOpMask = 0x60200000,
+ NOT = 0x00200000,
+ AND = 0x00000000,
+ BIC = AND | NOT,
+ ORR = 0x20000000,
+ ORN = ORR | NOT,
+ EOR = 0x40000000,
+ EON = EOR | NOT,
+ ANDS = 0x60000000,
+ BICS = ANDS | NOT
+};
+
+// Logical immediate.
+enum LogicalImmediateOp {
+ LogicalImmediateFixed = 0x12000000,
+ LogicalImmediateFMask = 0x1F800000,
+ LogicalImmediateMask = 0xFF800000,
+ AND_w_imm = LogicalImmediateFixed | AND,
+ AND_x_imm = LogicalImmediateFixed | AND | SixtyFourBits,
+ ORR_w_imm = LogicalImmediateFixed | ORR,
+ ORR_x_imm = LogicalImmediateFixed | ORR | SixtyFourBits,
+ EOR_w_imm = LogicalImmediateFixed | EOR,
+ EOR_x_imm = LogicalImmediateFixed | EOR | SixtyFourBits,
+ ANDS_w_imm = LogicalImmediateFixed | ANDS,
+ ANDS_x_imm = LogicalImmediateFixed | ANDS | SixtyFourBits
+};
+
+// Logical shifted register.
+enum LogicalShiftedOp {
+ LogicalShiftedFixed = 0x0A000000,
+ LogicalShiftedFMask = 0x1F000000,
+ LogicalShiftedMask = 0xFF200000,
+ AND_w = LogicalShiftedFixed | AND,
+ AND_x = LogicalShiftedFixed | AND | SixtyFourBits,
+ AND_shift = AND_w,
+ BIC_w = LogicalShiftedFixed | BIC,
+ BIC_x = LogicalShiftedFixed | BIC | SixtyFourBits,
+ BIC_shift = BIC_w,
+ ORR_w = LogicalShiftedFixed | ORR,
+ ORR_x = LogicalShiftedFixed | ORR | SixtyFourBits,
+ ORR_shift = ORR_w,
+ ORN_w = LogicalShiftedFixed | ORN,
+ ORN_x = LogicalShiftedFixed | ORN | SixtyFourBits,
+ ORN_shift = ORN_w,
+ EOR_w = LogicalShiftedFixed | EOR,
+ EOR_x = LogicalShiftedFixed | EOR | SixtyFourBits,
+ EOR_shift = EOR_w,
+ EON_w = LogicalShiftedFixed | EON,
+ EON_x = LogicalShiftedFixed | EON | SixtyFourBits,
+ EON_shift = EON_w,
+ ANDS_w = LogicalShiftedFixed | ANDS,
+ ANDS_x = LogicalShiftedFixed | ANDS | SixtyFourBits,
+ ANDS_shift = ANDS_w,
+ BICS_w = LogicalShiftedFixed | BICS,
+ BICS_x = LogicalShiftedFixed | BICS | SixtyFourBits,
+ BICS_shift = BICS_w
+};
+
+// Move wide immediate.
+enum MoveWideImmediateOp {
+ MoveWideImmediateFixed = 0x12800000,
+ MoveWideImmediateFMask = 0x1F800000,
+ MoveWideImmediateMask = 0xFF800000,
+ MOVN = 0x00000000,
+ MOVZ = 0x40000000,
+ MOVK = 0x60000000,
+ MOVN_w = MoveWideImmediateFixed | MOVN,
+ MOVN_x = MoveWideImmediateFixed | MOVN | SixtyFourBits,
+ MOVZ_w = MoveWideImmediateFixed | MOVZ,
+ MOVZ_x = MoveWideImmediateFixed | MOVZ | SixtyFourBits,
+ MOVK_w = MoveWideImmediateFixed | MOVK,
+ MOVK_x = MoveWideImmediateFixed | MOVK | SixtyFourBits
+};
+
+// Bitfield.
+const int kBitfieldNOffset = 22;
+enum BitfieldOp {
+ BitfieldFixed = 0x13000000,
+ BitfieldFMask = 0x1F800000,
+ BitfieldMask = 0xFF800000,
+ SBFM_w = BitfieldFixed | 0x00000000,
+ SBFM_x = BitfieldFixed | 0x80000000,
+ SBFM = SBFM_w,
+ BFM_w = BitfieldFixed | 0x20000000,
+ BFM_x = BitfieldFixed | 0xA0000000,
+ BFM = BFM_w,
+ UBFM_w = BitfieldFixed | 0x40000000,
+ UBFM_x = BitfieldFixed | 0xC0000000,
+ UBFM = UBFM_w
+ // Bitfield N field.
+};
+
+// Extract.
+enum ExtractOp {
+ ExtractFixed = 0x13800000,
+ ExtractFMask = 0x1F800000,
+ ExtractMask = 0xFFA00000,
+ EXTR_w = ExtractFixed | 0x00000000,
+ EXTR_x = ExtractFixed | 0x80000000,
+ EXTR = EXTR_w
+};
+
+// Unconditional branch.
+enum UnconditionalBranchOp {
+ UnconditionalBranchFixed = 0x14000000,
+ UnconditionalBranchFMask = 0x7C000000,
+ UnconditionalBranchMask = 0xFC000000,
+ B = UnconditionalBranchFixed | 0x00000000,
+ BL = UnconditionalBranchFixed | 0x80000000
+};
+
+// Unconditional branch to register.
+enum UnconditionalBranchToRegisterOp {
+ UnconditionalBranchToRegisterFixed = 0xD6000000,
+ UnconditionalBranchToRegisterFMask = 0xFE000000,
+ UnconditionalBranchToRegisterMask = 0xFFFFFC1F,
+ BR = UnconditionalBranchToRegisterFixed | 0x001F0000,
+ BLR = UnconditionalBranchToRegisterFixed | 0x003F0000,
+ RET = UnconditionalBranchToRegisterFixed | 0x005F0000
+};
+
+// Compare and branch.
+enum CompareBranchOp {
+ CompareBranchFixed = 0x34000000,
+ CompareBranchFMask = 0x7E000000,
+ CompareBranchMask = 0xFF000000,
+ CBZ_w = CompareBranchFixed | 0x00000000,
+ CBZ_x = CompareBranchFixed | 0x80000000,
+ CBZ = CBZ_w,
+ CBNZ_w = CompareBranchFixed | 0x01000000,
+ CBNZ_x = CompareBranchFixed | 0x81000000,
+ CBNZ = CBNZ_w
+};
+
+// Test and branch.
+enum TestBranchOp {
+ TestBranchFixed = 0x36000000,
+ TestBranchFMask = 0x7E000000,
+ TestBranchMask = 0x7F000000,
+ TBZ = TestBranchFixed | 0x00000000,
+ TBNZ = TestBranchFixed | 0x01000000
+};
+
+// Conditional branch.
+enum ConditionalBranchOp {
+ ConditionalBranchFixed = 0x54000000,
+ ConditionalBranchFMask = 0xFE000000,
+ ConditionalBranchMask = 0xFF000010,
+ B_cond = ConditionalBranchFixed | 0x00000000
+};
+
+// System.
+// System instruction encoding is complicated because some instructions use op
+// and CR fields to encode parameters. To handle this cleanly, the system
+// instructions are split into more than one enum.
+
+enum SystemOp {
+ SystemFixed = 0xD5000000,
+ SystemFMask = 0xFFC00000
+};
+
+enum SystemSysRegOp {
+ SystemSysRegFixed = 0xD5100000,
+ SystemSysRegFMask = 0xFFD00000,
+ SystemSysRegMask = 0xFFF00000,
+ MRS = SystemSysRegFixed | 0x00200000,
+ MSR = SystemSysRegFixed | 0x00000000
+};
+
+enum SystemHintOp {
+ SystemHintFixed = 0xD503201F,
+ SystemHintFMask = 0xFFFFF01F,
+ SystemHintMask = 0xFFFFF01F,
+ HINT = SystemHintFixed | 0x00000000
+};
+
+// Exception.
+enum ExceptionOp {
+ ExceptionFixed = 0xD4000000,
+ ExceptionFMask = 0xFF000000,
+ ExceptionMask = 0xFFE0001F,
+ HLT = ExceptionFixed | 0x00400000,
+ BRK = ExceptionFixed | 0x00200000,
+ SVC = ExceptionFixed | 0x00000001,
+ HVC = ExceptionFixed | 0x00000002,
+ SMC = ExceptionFixed | 0x00000003,
+ DCPS1 = ExceptionFixed | 0x00A00001,
+ DCPS2 = ExceptionFixed | 0x00A00002,
+ DCPS3 = ExceptionFixed | 0x00A00003
+};
+
+// Any load or store.
+enum LoadStoreAnyOp {
+ LoadStoreAnyFMask = 0x0a000000,
+ LoadStoreAnyFixed = 0x08000000
+};
+
+#define LOAD_STORE_PAIR_OP_LIST(V) \
+ V(STP, w, 0x00000000), \
+ V(LDP, w, 0x00400000), \
+ V(LDPSW, x, 0x40400000), \
+ V(STP, x, 0x80000000), \
+ V(LDP, x, 0x80400000), \
+ V(STP, s, 0x04000000), \
+ V(LDP, s, 0x04400000), \
+ V(STP, d, 0x44000000), \
+ V(LDP, d, 0x44400000)
+
+// Load/store pair (post, pre and offset.)
+enum LoadStorePairOp {
+ LoadStorePairMask = 0xC4400000,
+ LoadStorePairLBit = 1 << 22,
+ #define LOAD_STORE_PAIR(A, B, C) \
+ A##_##B = C
+ LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR)
+ #undef LOAD_STORE_PAIR
+};
+
+enum LoadStorePairPostIndexOp {
+ LoadStorePairPostIndexFixed = 0x28800000,
+ LoadStorePairPostIndexFMask = 0x3B800000,
+ LoadStorePairPostIndexMask = 0xFFC00000,
+ #define LOAD_STORE_PAIR_POST_INDEX(A, B, C) \
+ A##_##B##_post = LoadStorePairPostIndexFixed | A##_##B
+ LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR_POST_INDEX)
+ #undef LOAD_STORE_PAIR_POST_INDEX
+};
+
+enum LoadStorePairPreIndexOp {
+ LoadStorePairPreIndexFixed = 0x29800000,
+ LoadStorePairPreIndexFMask = 0x3B800000,
+ LoadStorePairPreIndexMask = 0xFFC00000,
+ #define LOAD_STORE_PAIR_PRE_INDEX(A, B, C) \
+ A##_##B##_pre = LoadStorePairPreIndexFixed | A##_##B
+ LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR_PRE_INDEX)
+ #undef LOAD_STORE_PAIR_PRE_INDEX
+};
+
+enum LoadStorePairOffsetOp {
+ LoadStorePairOffsetFixed = 0x29000000,
+ LoadStorePairOffsetFMask = 0x3B800000,
+ LoadStorePairOffsetMask = 0xFFC00000,
+ #define LOAD_STORE_PAIR_OFFSET(A, B, C) \
+ A##_##B##_off = LoadStorePairOffsetFixed | A##_##B
+ LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR_OFFSET)
+ #undef LOAD_STORE_PAIR_OFFSET
+};
+
+enum LoadStorePairNonTemporalOp {
+ LoadStorePairNonTemporalFixed = 0x28000000,
+ LoadStorePairNonTemporalFMask = 0x3B800000,
+ LoadStorePairNonTemporalMask = 0xFFC00000,
+ STNP_w = LoadStorePairNonTemporalFixed | STP_w,
+ LDNP_w = LoadStorePairNonTemporalFixed | LDP_w,
+ STNP_x = LoadStorePairNonTemporalFixed | STP_x,
+ LDNP_x = LoadStorePairNonTemporalFixed | LDP_x,
+ STNP_s = LoadStorePairNonTemporalFixed | STP_s,
+ LDNP_s = LoadStorePairNonTemporalFixed | LDP_s,
+ STNP_d = LoadStorePairNonTemporalFixed | STP_d,
+ LDNP_d = LoadStorePairNonTemporalFixed | LDP_d
+};
+
+// Load literal.
+enum LoadLiteralOp {
+ LoadLiteralFixed = 0x18000000,
+ LoadLiteralFMask = 0x3B000000,
+ LoadLiteralMask = 0xFF000000,
+ LDR_w_lit = LoadLiteralFixed | 0x00000000,
+ LDR_x_lit = LoadLiteralFixed | 0x40000000,
+ LDRSW_x_lit = LoadLiteralFixed | 0x80000000,
+ PRFM_lit = LoadLiteralFixed | 0xC0000000,
+ LDR_s_lit = LoadLiteralFixed | 0x04000000,
+ LDR_d_lit = LoadLiteralFixed | 0x44000000
+};
+
+#define LOAD_STORE_OP_LIST(V) \
+ V(ST, RB, w, 0x00000000), \
+ V(ST, RH, w, 0x40000000), \
+ V(ST, R, w, 0x80000000), \
+ V(ST, R, x, 0xC0000000), \
+ V(LD, RB, w, 0x00400000), \
+ V(LD, RH, w, 0x40400000), \
+ V(LD, R, w, 0x80400000), \
+ V(LD, R, x, 0xC0400000), \
+ V(LD, RSB, x, 0x00800000), \
+ V(LD, RSH, x, 0x40800000), \
+ V(LD, RSW, x, 0x80800000), \
+ V(LD, RSB, w, 0x00C00000), \
+ V(LD, RSH, w, 0x40C00000), \
+ V(ST, R, s, 0x84000000), \
+ V(ST, R, d, 0xC4000000), \
+ V(LD, R, s, 0x84400000), \
+ V(LD, R, d, 0xC4400000)
+
+
+// Load/store unscaled offset.
+enum LoadStoreUnscaledOffsetOp {
+ LoadStoreUnscaledOffsetFixed = 0x38000000,
+ LoadStoreUnscaledOffsetFMask = 0x3B200C00,
+ LoadStoreUnscaledOffsetMask = 0xFFE00C00,
+ #define LOAD_STORE_UNSCALED(A, B, C, D) \
+ A##U##B##_##C = LoadStoreUnscaledOffsetFixed | D
+ LOAD_STORE_OP_LIST(LOAD_STORE_UNSCALED)
+ #undef LOAD_STORE_UNSCALED
+};
+
+// Load/store (post, pre, offset and unsigned.)
+enum LoadStoreOp {
+ LoadStoreOpMask = 0xC4C00000,
+ #define LOAD_STORE(A, B, C, D) \
+ A##B##_##C = D
+ LOAD_STORE_OP_LIST(LOAD_STORE),
+ #undef LOAD_STORE
+ PRFM = 0xC0800000
+};
+
+// Load/store post index.
+enum LoadStorePostIndex {
+ LoadStorePostIndexFixed = 0x38000400,
+ LoadStorePostIndexFMask = 0x3B200C00,
+ LoadStorePostIndexMask = 0xFFE00C00,
+ #define LOAD_STORE_POST_INDEX(A, B, C, D) \
+ A##B##_##C##_post = LoadStorePostIndexFixed | D
+ LOAD_STORE_OP_LIST(LOAD_STORE_POST_INDEX)
+ #undef LOAD_STORE_POST_INDEX
+};
+
+// Load/store pre index.
+enum LoadStorePreIndex {
+ LoadStorePreIndexFixed = 0x38000C00,
+ LoadStorePreIndexFMask = 0x3B200C00,
+ LoadStorePreIndexMask = 0xFFE00C00,
+ #define LOAD_STORE_PRE_INDEX(A, B, C, D) \
+ A##B##_##C##_pre = LoadStorePreIndexFixed | D
+ LOAD_STORE_OP_LIST(LOAD_STORE_PRE_INDEX)
+ #undef LOAD_STORE_PRE_INDEX
+};
+
+// Load/store unsigned offset.
+enum LoadStoreUnsignedOffset {
+ LoadStoreUnsignedOffsetFixed = 0x39000000,
+ LoadStoreUnsignedOffsetFMask = 0x3B000000,
+ LoadStoreUnsignedOffsetMask = 0xFFC00000,
+ PRFM_unsigned = LoadStoreUnsignedOffsetFixed | PRFM,
+ #define LOAD_STORE_UNSIGNED_OFFSET(A, B, C, D) \
+ A##B##_##C##_unsigned = LoadStoreUnsignedOffsetFixed | D
+ LOAD_STORE_OP_LIST(LOAD_STORE_UNSIGNED_OFFSET)
+ #undef LOAD_STORE_UNSIGNED_OFFSET
+};
+
+// Load/store register offset.
+enum LoadStoreRegisterOffset {
+ LoadStoreRegisterOffsetFixed = 0x38200800,
+ LoadStoreRegisterOffsetFMask = 0x3B200C00,
+ LoadStoreRegisterOffsetMask = 0xFFE00C00,
+ PRFM_reg = LoadStoreRegisterOffsetFixed | PRFM,
+ #define LOAD_STORE_REGISTER_OFFSET(A, B, C, D) \
+ A##B##_##C##_reg = LoadStoreRegisterOffsetFixed | D
+ LOAD_STORE_OP_LIST(LOAD_STORE_REGISTER_OFFSET)
+ #undef LOAD_STORE_REGISTER_OFFSET
+};
+
+// Conditional compare.
+enum ConditionalCompareOp {
+ ConditionalCompareMask = 0x60000000,
+ CCMN = 0x20000000,
+ CCMP = 0x60000000
+};
+
+// Conditional compare register.
+enum ConditionalCompareRegisterOp {
+ ConditionalCompareRegisterFixed = 0x1A400000,
+ ConditionalCompareRegisterFMask = 0x1FE00800,
+ ConditionalCompareRegisterMask = 0xFFE00C10,
+ CCMN_w = ConditionalCompareRegisterFixed | CCMN,
+ CCMN_x = ConditionalCompareRegisterFixed | SixtyFourBits | CCMN,
+ CCMP_w = ConditionalCompareRegisterFixed | CCMP,
+ CCMP_x = ConditionalCompareRegisterFixed | SixtyFourBits | CCMP
+};
+
+// Conditional compare immediate.
+enum ConditionalCompareImmediateOp {
+ ConditionalCompareImmediateFixed = 0x1A400800,
+ ConditionalCompareImmediateFMask = 0x1FE00800,
+ ConditionalCompareImmediateMask = 0xFFE00C10,
+ CCMN_w_imm = ConditionalCompareImmediateFixed | CCMN,
+ CCMN_x_imm = ConditionalCompareImmediateFixed | SixtyFourBits | CCMN,
+ CCMP_w_imm = ConditionalCompareImmediateFixed | CCMP,
+ CCMP_x_imm = ConditionalCompareImmediateFixed | SixtyFourBits | CCMP
+};
+
+// Conditional select.
+enum ConditionalSelectOp {
+ ConditionalSelectFixed = 0x1A800000,
+ ConditionalSelectFMask = 0x1FE00000,
+ ConditionalSelectMask = 0xFFE00C00,
+ CSEL_w = ConditionalSelectFixed | 0x00000000,
+ CSEL_x = ConditionalSelectFixed | 0x80000000,
+ CSEL = CSEL_w,
+ CSINC_w = ConditionalSelectFixed | 0x00000400,
+ CSINC_x = ConditionalSelectFixed | 0x80000400,
+ CSINC = CSINC_w,
+ CSINV_w = ConditionalSelectFixed | 0x40000000,
+ CSINV_x = ConditionalSelectFixed | 0xC0000000,
+ CSINV = CSINV_w,
+ CSNEG_w = ConditionalSelectFixed | 0x40000400,
+ CSNEG_x = ConditionalSelectFixed | 0xC0000400,
+ CSNEG = CSNEG_w
+};
+
+// Data processing 1 source.
+enum DataProcessing1SourceOp {
+ DataProcessing1SourceFixed = 0x5AC00000,
+ DataProcessing1SourceFMask = 0x5FE00000,
+ DataProcessing1SourceMask = 0xFFFFFC00,
+ RBIT = DataProcessing1SourceFixed | 0x00000000,
+ RBIT_w = RBIT,
+ RBIT_x = RBIT | SixtyFourBits,
+ REV16 = DataProcessing1SourceFixed | 0x00000400,
+ REV16_w = REV16,
+ REV16_x = REV16 | SixtyFourBits,
+ REV = DataProcessing1SourceFixed | 0x00000800,
+ REV_w = REV,
+ REV32_x = REV | SixtyFourBits,
+ REV_x = DataProcessing1SourceFixed | SixtyFourBits | 0x00000C00,
+ CLZ = DataProcessing1SourceFixed | 0x00001000,
+ CLZ_w = CLZ,
+ CLZ_x = CLZ | SixtyFourBits,
+ CLS = DataProcessing1SourceFixed | 0x00001400,
+ CLS_w = CLS,
+ CLS_x = CLS | SixtyFourBits
+};
+
+// Data processing 2 source.
+enum DataProcessing2SourceOp {
+ DataProcessing2SourceFixed = 0x1AC00000,
+ DataProcessing2SourceFMask = 0x5FE00000,
+ DataProcessing2SourceMask = 0xFFE0FC00,
+ UDIV_w = DataProcessing2SourceFixed | 0x00000800,
+ UDIV_x = DataProcessing2SourceFixed | 0x80000800,
+ UDIV = UDIV_w,
+ SDIV_w = DataProcessing2SourceFixed | 0x00000C00,
+ SDIV_x = DataProcessing2SourceFixed | 0x80000C00,
+ SDIV = SDIV_w,
+ LSLV_w = DataProcessing2SourceFixed | 0x00002000,
+ LSLV_x = DataProcessing2SourceFixed | 0x80002000,
+ LSLV = LSLV_w,
+ LSRV_w = DataProcessing2SourceFixed | 0x00002400,
+ LSRV_x = DataProcessing2SourceFixed | 0x80002400,
+ LSRV = LSRV_w,
+ ASRV_w = DataProcessing2SourceFixed | 0x00002800,
+ ASRV_x = DataProcessing2SourceFixed | 0x80002800,
+ ASRV = ASRV_w,
+ RORV_w = DataProcessing2SourceFixed | 0x00002C00,
+ RORV_x = DataProcessing2SourceFixed | 0x80002C00,
+ RORV = RORV_w,
+ CRC32B = DataProcessing2SourceFixed | 0x00004000,
+ CRC32H = DataProcessing2SourceFixed | 0x00004400,
+ CRC32W = DataProcessing2SourceFixed | 0x00004800,
+ CRC32X = DataProcessing2SourceFixed | SixtyFourBits | 0x00004C00,
+ CRC32CB = DataProcessing2SourceFixed | 0x00005000,
+ CRC32CH = DataProcessing2SourceFixed | 0x00005400,
+ CRC32CW = DataProcessing2SourceFixed | 0x00005800,
+ CRC32CX = DataProcessing2SourceFixed | SixtyFourBits | 0x00005C00
+};
+
+// Data processing 3 source.
+enum DataProcessing3SourceOp {
+ DataProcessing3SourceFixed = 0x1B000000,
+ DataProcessing3SourceFMask = 0x1F000000,
+ DataProcessing3SourceMask = 0xFFE08000,
+ MADD_w = DataProcessing3SourceFixed | 0x00000000,
+ MADD_x = DataProcessing3SourceFixed | 0x80000000,
+ MADD = MADD_w,
+ MSUB_w = DataProcessing3SourceFixed | 0x00008000,
+ MSUB_x = DataProcessing3SourceFixed | 0x80008000,
+ MSUB = MSUB_w,
+ SMADDL_x = DataProcessing3SourceFixed | 0x80200000,
+ SMSUBL_x = DataProcessing3SourceFixed | 0x80208000,
+ SMULH_x = DataProcessing3SourceFixed | 0x80400000,
+ UMADDL_x = DataProcessing3SourceFixed | 0x80A00000,
+ UMSUBL_x = DataProcessing3SourceFixed | 0x80A08000,
+ UMULH_x = DataProcessing3SourceFixed | 0x80C00000
+};
+
+// Floating point compare.
+enum FPCompareOp {
+ FPCompareFixed = 0x1E202000,
+ FPCompareFMask = 0x5F203C00,
+ FPCompareMask = 0xFFE0FC1F,
+ FCMP_s = FPCompareFixed | 0x00000000,
+ FCMP_d = FPCompareFixed | FP64 | 0x00000000,
+ FCMP = FCMP_s,
+ FCMP_s_zero = FPCompareFixed | 0x00000008,
+ FCMP_d_zero = FPCompareFixed | FP64 | 0x00000008,
+ FCMP_zero = FCMP_s_zero,
+ FCMPE_s = FPCompareFixed | 0x00000010,
+ FCMPE_d = FPCompareFixed | FP64 | 0x00000010,
+ FCMPE_s_zero = FPCompareFixed | 0x00000018,
+ FCMPE_d_zero = FPCompareFixed | FP64 | 0x00000018
+};
+
+// Floating point conditional compare.
+enum FPConditionalCompareOp {
+ FPConditionalCompareFixed = 0x1E200400,
+ FPConditionalCompareFMask = 0x5F200C00,
+ FPConditionalCompareMask = 0xFFE00C10,
+ FCCMP_s = FPConditionalCompareFixed | 0x00000000,
+ FCCMP_d = FPConditionalCompareFixed | FP64 | 0x00000000,
+ FCCMP = FCCMP_s,
+ FCCMPE_s = FPConditionalCompareFixed | 0x00000010,
+ FCCMPE_d = FPConditionalCompareFixed | FP64 | 0x00000010,
+ FCCMPE = FCCMPE_s
+};
+
+// Floating point conditional select.
+enum FPConditionalSelectOp {
+ FPConditionalSelectFixed = 0x1E200C00,
+ FPConditionalSelectFMask = 0x5F200C00,
+ FPConditionalSelectMask = 0xFFE00C00,
+ FCSEL_s = FPConditionalSelectFixed | 0x00000000,
+ FCSEL_d = FPConditionalSelectFixed | FP64 | 0x00000000,
+ FCSEL = FCSEL_s
+};
+
+// Floating point immediate.
+enum FPImmediateOp {
+ FPImmediateFixed = 0x1E201000,
+ FPImmediateFMask = 0x5F201C00,
+ FPImmediateMask = 0xFFE01C00,
+ FMOV_s_imm = FPImmediateFixed | 0x00000000,
+ FMOV_d_imm = FPImmediateFixed | FP64 | 0x00000000
+};
+
+// Floating point data processing 1 source.
+enum FPDataProcessing1SourceOp {
+ FPDataProcessing1SourceFixed = 0x1E204000,
+ FPDataProcessing1SourceFMask = 0x5F207C00,
+ FPDataProcessing1SourceMask = 0xFFFFFC00,
+ FMOV_s = FPDataProcessing1SourceFixed | 0x00000000,
+ FMOV_d = FPDataProcessing1SourceFixed | FP64 | 0x00000000,
+ FMOV = FMOV_s,
+ FABS_s = FPDataProcessing1SourceFixed | 0x00008000,
+ FABS_d = FPDataProcessing1SourceFixed | FP64 | 0x00008000,
+ FABS = FABS_s,
+ FNEG_s = FPDataProcessing1SourceFixed | 0x00010000,
+ FNEG_d = FPDataProcessing1SourceFixed | FP64 | 0x00010000,
+ FNEG = FNEG_s,
+ FSQRT_s = FPDataProcessing1SourceFixed | 0x00018000,
+ FSQRT_d = FPDataProcessing1SourceFixed | FP64 | 0x00018000,
+ FSQRT = FSQRT_s,
+ FCVT_ds = FPDataProcessing1SourceFixed | 0x00028000,
+ FCVT_sd = FPDataProcessing1SourceFixed | FP64 | 0x00020000,
+ FRINTN_s = FPDataProcessing1SourceFixed | 0x00040000,
+ FRINTN_d = FPDataProcessing1SourceFixed | FP64 | 0x00040000,
+ FRINTN = FRINTN_s,
+ FRINTP_s = FPDataProcessing1SourceFixed | 0x00048000,
+ FRINTP_d = FPDataProcessing1SourceFixed | FP64 | 0x00048000,
+ FRINTM_s = FPDataProcessing1SourceFixed | 0x00050000,
+ FRINTM_d = FPDataProcessing1SourceFixed | FP64 | 0x00050000,
+ FRINTZ_s = FPDataProcessing1SourceFixed | 0x00058000,
+ FRINTZ_d = FPDataProcessing1SourceFixed | FP64 | 0x00058000,
+ FRINTZ = FRINTZ_s,
+ FRINTA_s = FPDataProcessing1SourceFixed | 0x00060000,
+ FRINTA_d = FPDataProcessing1SourceFixed | FP64 | 0x00060000,
+ FRINTX_s = FPDataProcessing1SourceFixed | 0x00070000,
+ FRINTX_d = FPDataProcessing1SourceFixed | FP64 | 0x00070000,
+ FRINTI_s = FPDataProcessing1SourceFixed | 0x00078000,
+ FRINTI_d = FPDataProcessing1SourceFixed | FP64 | 0x00078000
+};
+
+// Floating point data processing 2 source.
+enum FPDataProcessing2SourceOp {
+ FPDataProcessing2SourceFixed = 0x1E200800,
+ FPDataProcessing2SourceFMask = 0x5F200C00,
+ FPDataProcessing2SourceMask = 0xFFE0FC00,
+ FMUL = FPDataProcessing2SourceFixed | 0x00000000,
+ FMUL_s = FMUL,
+ FMUL_d = FMUL | FP64,
+ FDIV = FPDataProcessing2SourceFixed | 0x00001000,
+ FDIV_s = FDIV,
+ FDIV_d = FDIV | FP64,
+ FADD = FPDataProcessing2SourceFixed | 0x00002000,
+ FADD_s = FADD,
+ FADD_d = FADD | FP64,
+ FSUB = FPDataProcessing2SourceFixed | 0x00003000,
+ FSUB_s = FSUB,
+ FSUB_d = FSUB | FP64,
+ FMAX = FPDataProcessing2SourceFixed | 0x00004000,
+ FMAX_s = FMAX,
+ FMAX_d = FMAX | FP64,
+ FMIN = FPDataProcessing2SourceFixed | 0x00005000,
+ FMIN_s = FMIN,
+ FMIN_d = FMIN | FP64,
+ FMAXNM = FPDataProcessing2SourceFixed | 0x00006000,
+ FMAXNM_s = FMAXNM,
+ FMAXNM_d = FMAXNM | FP64,
+ FMINNM = FPDataProcessing2SourceFixed | 0x00007000,
+ FMINNM_s = FMINNM,
+ FMINNM_d = FMINNM | FP64,
+ FNMUL = FPDataProcessing2SourceFixed | 0x00008000,
+ FNMUL_s = FNMUL,
+ FNMUL_d = FNMUL | FP64
+};
+
+// Floating point data processing 3 source.
+enum FPDataProcessing3SourceOp {
+ FPDataProcessing3SourceFixed = 0x1F000000,
+ FPDataProcessing3SourceFMask = 0x5F000000,
+ FPDataProcessing3SourceMask = 0xFFE08000,
+ FMADD_s = FPDataProcessing3SourceFixed | 0x00000000,
+ FMSUB_s = FPDataProcessing3SourceFixed | 0x00008000,
+ FNMADD_s = FPDataProcessing3SourceFixed | 0x00200000,
+ FNMSUB_s = FPDataProcessing3SourceFixed | 0x00208000,
+ FMADD_d = FPDataProcessing3SourceFixed | 0x00400000,
+ FMSUB_d = FPDataProcessing3SourceFixed | 0x00408000,
+ FNMADD_d = FPDataProcessing3SourceFixed | 0x00600000,
+ FNMSUB_d = FPDataProcessing3SourceFixed | 0x00608000
+};
+
+// Conversion between floating point and integer.
+enum FPIntegerConvertOp {
+ FPIntegerConvertFixed = 0x1E200000,
+ FPIntegerConvertFMask = 0x5F20FC00,
+ FPIntegerConvertMask = 0xFFFFFC00,
+ FCVTNS = FPIntegerConvertFixed | 0x00000000,
+ FCVTNS_ws = FCVTNS,
+ FCVTNS_xs = FCVTNS | SixtyFourBits,
+ FCVTNS_wd = FCVTNS | FP64,
+ FCVTNS_xd = FCVTNS | SixtyFourBits | FP64,
+ FCVTNU = FPIntegerConvertFixed | 0x00010000,
+ FCVTNU_ws = FCVTNU,
+ FCVTNU_xs = FCVTNU | SixtyFourBits,
+ FCVTNU_wd = FCVTNU | FP64,
+ FCVTNU_xd = FCVTNU | SixtyFourBits | FP64,
+ FCVTPS = FPIntegerConvertFixed | 0x00080000,
+ FCVTPS_ws = FCVTPS,
+ FCVTPS_xs = FCVTPS | SixtyFourBits,
+ FCVTPS_wd = FCVTPS | FP64,
+ FCVTPS_xd = FCVTPS | SixtyFourBits | FP64,
+ FCVTPU = FPIntegerConvertFixed | 0x00090000,
+ FCVTPU_ws = FCVTPU,
+ FCVTPU_xs = FCVTPU | SixtyFourBits,
+ FCVTPU_wd = FCVTPU | FP64,
+ FCVTPU_xd = FCVTPU | SixtyFourBits | FP64,
+ FCVTMS = FPIntegerConvertFixed | 0x00100000,
+ FCVTMS_ws = FCVTMS,
+ FCVTMS_xs = FCVTMS | SixtyFourBits,
+ FCVTMS_wd = FCVTMS | FP64,
+ FCVTMS_xd = FCVTMS | SixtyFourBits | FP64,
+ FCVTMU = FPIntegerConvertFixed | 0x00110000,
+ FCVTMU_ws = FCVTMU,
+ FCVTMU_xs = FCVTMU | SixtyFourBits,
+ FCVTMU_wd = FCVTMU | FP64,
+ FCVTMU_xd = FCVTMU | SixtyFourBits | FP64,
+ FCVTZS = FPIntegerConvertFixed | 0x00180000,
+ FCVTZS_ws = FCVTZS,
+ FCVTZS_xs = FCVTZS | SixtyFourBits,
+ FCVTZS_wd = FCVTZS | FP64,
+ FCVTZS_xd = FCVTZS | SixtyFourBits | FP64,
+ FCVTZU = FPIntegerConvertFixed | 0x00190000,
+ FCVTZU_ws = FCVTZU,
+ FCVTZU_xs = FCVTZU | SixtyFourBits,
+ FCVTZU_wd = FCVTZU | FP64,
+ FCVTZU_xd = FCVTZU | SixtyFourBits | FP64,
+ SCVTF = FPIntegerConvertFixed | 0x00020000,
+ SCVTF_sw = SCVTF,
+ SCVTF_sx = SCVTF | SixtyFourBits,
+ SCVTF_dw = SCVTF | FP64,
+ SCVTF_dx = SCVTF | SixtyFourBits | FP64,
+ UCVTF = FPIntegerConvertFixed | 0x00030000,
+ UCVTF_sw = UCVTF,
+ UCVTF_sx = UCVTF | SixtyFourBits,
+ UCVTF_dw = UCVTF | FP64,
+ UCVTF_dx = UCVTF | SixtyFourBits | FP64,
+ FCVTAS = FPIntegerConvertFixed | 0x00040000,
+ FCVTAS_ws = FCVTAS,
+ FCVTAS_xs = FCVTAS | SixtyFourBits,
+ FCVTAS_wd = FCVTAS | FP64,
+ FCVTAS_xd = FCVTAS | SixtyFourBits | FP64,
+ FCVTAU = FPIntegerConvertFixed | 0x00050000,
+ FCVTAU_ws = FCVTAU,
+ FCVTAU_xs = FCVTAU | SixtyFourBits,
+ FCVTAU_wd = FCVTAU | FP64,
+ FCVTAU_xd = FCVTAU | SixtyFourBits | FP64,
+ FMOV_ws = FPIntegerConvertFixed | 0x00060000,
+ FMOV_sw = FPIntegerConvertFixed | 0x00070000,
+ FMOV_xd = FMOV_ws | SixtyFourBits | FP64,
+ FMOV_dx = FMOV_sw | SixtyFourBits | FP64
+};
+
+// Conversion between fixed point and floating point.
+enum FPFixedPointConvertOp {
+ FPFixedPointConvertFixed = 0x1E000000,
+ FPFixedPointConvertFMask = 0x5F200000,
+ FPFixedPointConvertMask = 0xFFFF0000,
+ FCVTZS_fixed = FPFixedPointConvertFixed | 0x00180000,
+ FCVTZS_ws_fixed = FCVTZS_fixed,
+ FCVTZS_xs_fixed = FCVTZS_fixed | SixtyFourBits,
+ FCVTZS_wd_fixed = FCVTZS_fixed | FP64,
+ FCVTZS_xd_fixed = FCVTZS_fixed | SixtyFourBits | FP64,
+ FCVTZU_fixed = FPFixedPointConvertFixed | 0x00190000,
+ FCVTZU_ws_fixed = FCVTZU_fixed,
+ FCVTZU_xs_fixed = FCVTZU_fixed | SixtyFourBits,
+ FCVTZU_wd_fixed = FCVTZU_fixed | FP64,
+ FCVTZU_xd_fixed = FCVTZU_fixed | SixtyFourBits | FP64,
+ SCVTF_fixed = FPFixedPointConvertFixed | 0x00020000,
+ SCVTF_sw_fixed = SCVTF_fixed,
+ SCVTF_sx_fixed = SCVTF_fixed | SixtyFourBits,
+ SCVTF_dw_fixed = SCVTF_fixed | FP64,
+ SCVTF_dx_fixed = SCVTF_fixed | SixtyFourBits | FP64,
+ UCVTF_fixed = FPFixedPointConvertFixed | 0x00030000,
+ UCVTF_sw_fixed = UCVTF_fixed,
+ UCVTF_sx_fixed = UCVTF_fixed | SixtyFourBits,
+ UCVTF_dw_fixed = UCVTF_fixed | FP64,
+ UCVTF_dx_fixed = UCVTF_fixed | SixtyFourBits | FP64
+};
+
+// Unimplemented and unallocated instructions. These are defined to make fixed
+// bit assertion easier.
+enum UnimplementedOp {
+ UnimplementedFixed = 0x00000000,
+ UnimplementedFMask = 0x00000000
+};
+
+enum UnallocatedOp {
+ UnallocatedFixed = 0x00000000,
+ UnallocatedFMask = 0x00000000
+};
+
+} // namespace vixl
+
+#endif // VIXL_A64_CONSTANTS_A64_H_
diff --git a/disas/libvixl/a64/cpu-a64.h b/disas/libvixl/a64/cpu-a64.h
new file mode 100644
index 0000000000..dfd8f015cf
--- /dev/null
+++ b/disas/libvixl/a64/cpu-a64.h
@@ -0,0 +1,56 @@
+// Copyright 2013, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef VIXL_CPU_A64_H
+#define VIXL_CPU_A64_H
+
+#include "globals.h"
+
+namespace vixl {
+
+class CPU {
+ public:
+ // Initialise CPU support.
+ static void SetUp();
+
+ // Ensures the data at a given address and with a given size is the same for
+ // the I and D caches. I and D caches are not automatically coherent on ARM
+ // so this operation is required before any dynamically generated code can
+ // safely run.
+ static void EnsureIAndDCacheCoherency(void *address, size_t length);
+
+ private:
+ // Return the content of the cache type register.
+ static uint32_t GetCacheType();
+
+ // I and D cache line size in bytes.
+ static unsigned icache_line_size_;
+ static unsigned dcache_line_size_;
+};
+
+} // namespace vixl
+
+#endif // VIXL_CPU_A64_H
diff --git a/disas/libvixl/a64/decoder-a64.cc b/disas/libvixl/a64/decoder-a64.cc
new file mode 100644
index 0000000000..9e9033c49c
--- /dev/null
+++ b/disas/libvixl/a64/decoder-a64.cc
@@ -0,0 +1,712 @@
+// Copyright 2013, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "globals.h"
+#include "utils.h"
+#include "a64/decoder-a64.h"
+
+namespace vixl {
+// Top-level instruction decode function.
+void Decoder::Decode(Instruction *instr) {
+ if (instr->Bits(28, 27) == 0) {
+ VisitUnallocated(instr);
+ } else {
+ switch (instr->Bits(27, 24)) {
+ // 0: PC relative addressing.
+ case 0x0: DecodePCRelAddressing(instr); break;
+
+ // 1: Add/sub immediate.
+ case 0x1: DecodeAddSubImmediate(instr); break;
+
+ // A: Logical shifted register.
+ // Add/sub with carry.
+ // Conditional compare register.
+ // Conditional compare immediate.
+ // Conditional select.
+ // Data processing 1 source.
+ // Data processing 2 source.
+ // B: Add/sub shifted register.
+ // Add/sub extended register.
+ // Data processing 3 source.
+ case 0xA:
+ case 0xB: DecodeDataProcessing(instr); break;
+
+ // 2: Logical immediate.
+ // Move wide immediate.
+ case 0x2: DecodeLogical(instr); break;
+
+ // 3: Bitfield.
+ // Extract.
+ case 0x3: DecodeBitfieldExtract(instr); break;
+
+ // 4: Unconditional branch immediate.
+ // Exception generation.
+ // Compare and branch immediate.
+ // 5: Compare and branch immediate.
+ // Conditional branch.
+ // System.
+ // 6,7: Unconditional branch.
+ // Test and branch immediate.
+ case 0x4:
+ case 0x5:
+ case 0x6:
+ case 0x7: DecodeBranchSystemException(instr); break;
+
+ // 8,9: Load/store register pair post-index.
+ // Load register literal.
+ // Load/store register unscaled immediate.
+ // Load/store register immediate post-index.
+ // Load/store register immediate pre-index.
+ // Load/store register offset.
+ // Load/store exclusive.
+ // C,D: Load/store register pair offset.
+ // Load/store register pair pre-index.
+ // Load/store register unsigned immediate.
+ // Advanced SIMD.
+ case 0x8:
+ case 0x9:
+ case 0xC:
+ case 0xD: DecodeLoadStore(instr); break;
+
+ // E: FP fixed point conversion.
+ // FP integer conversion.
+ // FP data processing 1 source.
+ // FP compare.
+ // FP immediate.
+ // FP data processing 2 source.
+ // FP conditional compare.
+ // FP conditional select.
+ // Advanced SIMD.
+ // F: FP data processing 3 source.
+ // Advanced SIMD.
+ case 0xE:
+ case 0xF: DecodeFP(instr); break;
+ }
+ }
+}
+
+void Decoder::AppendVisitor(DecoderVisitor* new_visitor) {
+ visitors_.remove(new_visitor);
+ visitors_.push_front(new_visitor);
+}
+
+
+void Decoder::PrependVisitor(DecoderVisitor* new_visitor) {
+ visitors_.remove(new_visitor);
+ visitors_.push_back(new_visitor);
+}
+
+
+void Decoder::InsertVisitorBefore(DecoderVisitor* new_visitor,
+ DecoderVisitor* registered_visitor) {
+ visitors_.remove(new_visitor);
+ std::list<DecoderVisitor*>::iterator it;
+ for (it = visitors_.begin(); it != visitors_.end(); it++) {
+ if (*it == registered_visitor) {
+ visitors_.insert(it, new_visitor);
+ return;
+ }
+ }
+ // We reached the end of the list. The last element must be
+ // registered_visitor.
+ ASSERT(*it == registered_visitor);
+ visitors_.insert(it, new_visitor);
+}
+
+
+void Decoder::InsertVisitorAfter(DecoderVisitor* new_visitor,
+ DecoderVisitor* registered_visitor) {
+ visitors_.remove(new_visitor);
+ std::list<DecoderVisitor*>::iterator it;
+ for (it = visitors_.begin(); it != visitors_.end(); it++) {
+ if (*it == registered_visitor) {
+ it++;
+ visitors_.insert(it, new_visitor);
+ return;
+ }
+ }
+ // We reached the end of the list. The last element must be
+ // registered_visitor.
+ ASSERT(*it == registered_visitor);
+ visitors_.push_back(new_visitor);
+}
+
+
+void Decoder::RemoveVisitor(DecoderVisitor* visitor) {
+ visitors_.remove(visitor);
+}
+
+
+void Decoder::DecodePCRelAddressing(Instruction* instr) {
+ ASSERT(instr->Bits(27, 24) == 0x0);
+ // We know bit 28 is set, as <b28:b27> = 0 is filtered out at the top level
+ // decode.
+ ASSERT(instr->Bit(28) == 0x1);
+ VisitPCRelAddressing(instr);
+}
+
+
+void Decoder::DecodeBranchSystemException(Instruction* instr) {
+ ASSERT((instr->Bits(27, 24) == 0x4) ||
+ (instr->Bits(27, 24) == 0x5) ||
+ (instr->Bits(27, 24) == 0x6) ||
+ (instr->Bits(27, 24) == 0x7) );
+
+ switch (instr->Bits(31, 29)) {
+ case 0:
+ case 4: {
+ VisitUnconditionalBranch(instr);
+ break;
+ }
+ case 1:
+ case 5: {
+ if (instr->Bit(25) == 0) {
+ VisitCompareBranch(instr);
+ } else {
+ VisitTestBranch(instr);
+ }
+ break;
+ }
+ case 2: {
+ if (instr->Bit(25) == 0) {
+ if ((instr->Bit(24) == 0x1) ||
+ (instr->Mask(0x01000010) == 0x00000010)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitConditionalBranch(instr);
+ }
+ } else {
+ VisitUnallocated(instr);
+ }
+ break;
+ }
+ case 6: {
+ if (instr->Bit(25) == 0) {
+ if (instr->Bit(24) == 0) {
+ if ((instr->Bits(4, 2) != 0) ||
+ (instr->Mask(0x00E0001D) == 0x00200001) ||
+ (instr->Mask(0x00E0001D) == 0x00400001) ||
+ (instr->Mask(0x00E0001E) == 0x00200002) ||
+ (instr->Mask(0x00E0001E) == 0x00400002) ||
+ (instr->Mask(0x00E0001C) == 0x00600000) ||
+ (instr->Mask(0x00E0001C) == 0x00800000) ||
+ (instr->Mask(0x00E0001F) == 0x00A00000) ||
+ (instr->Mask(0x00C0001C) == 0x00C00000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitException(instr);
+ }
+ } else {
+ if (instr->Bits(23, 22) == 0) {
+ const Instr masked_003FF0E0 = instr->Mask(0x003FF0E0);
+ if ((instr->Bits(21, 19) == 0x4) ||
+ (masked_003FF0E0 == 0x00033000) ||
+ (masked_003FF0E0 == 0x003FF020) ||
+ (masked_003FF0E0 == 0x003FF060) ||
+ (masked_003FF0E0 == 0x003FF0E0) ||
+ (instr->Mask(0x00388000) == 0x00008000) ||
+ (instr->Mask(0x0038E000) == 0x00000000) ||
+ (instr->Mask(0x0039E000) == 0x00002000) ||
+ (instr->Mask(0x003AE000) == 0x00002000) ||
+ (instr->Mask(0x003CE000) == 0x00042000) ||
+ (instr->Mask(0x003FFFC0) == 0x000320C0) ||
+ (instr->Mask(0x003FF100) == 0x00032100) ||
+ (instr->Mask(0x003FF200) == 0x00032200) ||
+ (instr->Mask(0x003FF400) == 0x00032400) ||
+ (instr->Mask(0x003FF800) == 0x00032800) ||
+ (instr->Mask(0x0038F000) == 0x00005000) ||
+ (instr->Mask(0x0038E000) == 0x00006000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitSystem(instr);
+ }
+ } else {
+ VisitUnallocated(instr);
+ }
+ }
+ } else {
+ if ((instr->Bit(24) == 0x1) ||
+ (instr->Bits(20, 16) != 0x1F) ||
+ (instr->Bits(15, 10) != 0) ||
+ (instr->Bits(4, 0) != 0) ||
+ (instr->Bits(24, 21) == 0x3) ||
+ (instr->Bits(24, 22) == 0x3)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitUnconditionalBranchToRegister(instr);
+ }
+ }
+ break;
+ }
+ case 3:
+ case 7: {
+ VisitUnallocated(instr);
+ break;
+ }
+ }
+}
+
+
+void Decoder::DecodeLoadStore(Instruction* instr) {
+ ASSERT((instr->Bits(27, 24) == 0x8) ||
+ (instr->Bits(27, 24) == 0x9) ||
+ (instr->Bits(27, 24) == 0xC) ||
+ (instr->Bits(27, 24) == 0xD) );
+
+ if (instr->Bit(24) == 0) {
+ if (instr->Bit(28) == 0) {
+ if (instr->Bit(29) == 0) {
+ if (instr->Bit(26) == 0) {
+ // TODO: VisitLoadStoreExclusive.
+ VisitUnimplemented(instr);
+ } else {
+ DecodeAdvSIMDLoadStore(instr);
+ }
+ } else {
+ if ((instr->Bits(31, 30) == 0x3) ||
+ (instr->Mask(0xC4400000) == 0x40000000)) {
+ VisitUnallocated(instr);
+ } else {
+ if (instr->Bit(23) == 0) {
+ if (instr->Mask(0xC4400000) == 0xC0400000) {
+ VisitUnallocated(instr);
+ } else {
+ VisitLoadStorePairNonTemporal(instr);
+ }
+ } else {
+ VisitLoadStorePairPostIndex(instr);
+ }
+ }
+ }
+ } else {
+ if (instr->Bit(29) == 0) {
+ if (instr->Mask(0xC4000000) == 0xC4000000) {
+ VisitUnallocated(instr);
+ } else {
+ VisitLoadLiteral(instr);
+ }
+ } else {
+ if ((instr->Mask(0x84C00000) == 0x80C00000) ||
+ (instr->Mask(0x44800000) == 0x44800000) ||
+ (instr->Mask(0x84800000) == 0x84800000)) {
+ VisitUnallocated(instr);
+ } else {
+ if (instr->Bit(21) == 0) {
+ switch (instr->Bits(11, 10)) {
+ case 0: {
+ VisitLoadStoreUnscaledOffset(instr);
+ break;
+ }
+ case 1: {
+ if (instr->Mask(0xC4C00000) == 0xC0800000) {
+ VisitUnallocated(instr);
+ } else {
+ VisitLoadStorePostIndex(instr);
+ }
+ break;
+ }
+ case 2: {
+ // TODO: VisitLoadStoreRegisterOffsetUnpriv.
+ VisitUnimplemented(instr);
+ break;
+ }
+ case 3: {
+ if (instr->Mask(0xC4C00000) == 0xC0800000) {
+ VisitUnallocated(instr);
+ } else {
+ VisitLoadStorePreIndex(instr);
+ }
+ break;
+ }
+ }
+ } else {
+ if (instr->Bits(11, 10) == 0x2) {
+ if (instr->Bit(14) == 0) {
+ VisitUnallocated(instr);
+ } else {
+ VisitLoadStoreRegisterOffset(instr);
+ }
+ } else {
+ VisitUnallocated(instr);
+ }
+ }
+ }
+ }
+ }
+ } else {
+ if (instr->Bit(28) == 0) {
+ if (instr->Bit(29) == 0) {
+ VisitUnallocated(instr);
+ } else {
+ if ((instr->Bits(31, 30) == 0x3) ||
+ (instr->Mask(0xC4400000) == 0x40000000)) {
+ VisitUnallocated(instr);
+ } else {
+ if (instr->Bit(23) == 0) {
+ VisitLoadStorePairOffset(instr);
+ } else {
+ VisitLoadStorePairPreIndex(instr);
+ }
+ }
+ }
+ } else {
+ if (instr->Bit(29) == 0) {
+ VisitUnallocated(instr);
+ } else {
+ if ((instr->Mask(0x84C00000) == 0x80C00000) ||
+ (instr->Mask(0x44800000) == 0x44800000) ||
+ (instr->Mask(0x84800000) == 0x84800000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitLoadStoreUnsignedOffset(instr);
+ }
+ }
+ }
+ }
+}
+
+
+void Decoder::DecodeLogical(Instruction* instr) {
+ ASSERT(instr->Bits(27, 24) == 0x2);
+
+ if (instr->Mask(0x80400000) == 0x00400000) {
+ VisitUnallocated(instr);
+ } else {
+ if (instr->Bit(23) == 0) {
+ VisitLogicalImmediate(instr);
+ } else {
+ if (instr->Bits(30, 29) == 0x1) {
+ VisitUnallocated(instr);
+ } else {
+ VisitMoveWideImmediate(instr);
+ }
+ }
+ }
+}
+
+
+void Decoder::DecodeBitfieldExtract(Instruction* instr) {
+ ASSERT(instr->Bits(27, 24) == 0x3);
+
+ if ((instr->Mask(0x80400000) == 0x80000000) ||
+ (instr->Mask(0x80400000) == 0x00400000) ||
+ (instr->Mask(0x80008000) == 0x00008000)) {
+ VisitUnallocated(instr);
+ } else if (instr->Bit(23) == 0) {
+ if ((instr->Mask(0x80200000) == 0x00200000) ||
+ (instr->Mask(0x60000000) == 0x60000000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitBitfield(instr);
+ }
+ } else {
+ if ((instr->Mask(0x60200000) == 0x00200000) ||
+ (instr->Mask(0x60000000) != 0x00000000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitExtract(instr);
+ }
+ }
+}
+
+
+void Decoder::DecodeAddSubImmediate(Instruction* instr) {
+ ASSERT(instr->Bits(27, 24) == 0x1);
+ if (instr->Bit(23) == 1) {
+ VisitUnallocated(instr);
+ } else {
+ VisitAddSubImmediate(instr);
+ }
+}
+
+
+void Decoder::DecodeDataProcessing(Instruction* instr) {
+ ASSERT((instr->Bits(27, 24) == 0xA) ||
+ (instr->Bits(27, 24) == 0xB) );
+
+ if (instr->Bit(24) == 0) {
+ if (instr->Bit(28) == 0) {
+ if (instr->Mask(0x80008000) == 0x00008000) {
+ VisitUnallocated(instr);
+ } else {
+ VisitLogicalShifted(instr);
+ }
+ } else {
+ switch (instr->Bits(23, 21)) {
+ case 0: {
+ if (instr->Mask(0x0000FC00) != 0) {
+ VisitUnallocated(instr);
+ } else {
+ VisitAddSubWithCarry(instr);
+ }
+ break;
+ }
+ case 2: {
+ if ((instr->Bit(29) == 0) ||
+ (instr->Mask(0x00000410) != 0)) {
+ VisitUnallocated(instr);
+ } else {
+ if (instr->Bit(11) == 0) {
+ VisitConditionalCompareRegister(instr);
+ } else {
+ VisitConditionalCompareImmediate(instr);
+ }
+ }
+ break;
+ }
+ case 4: {
+ if (instr->Mask(0x20000800) != 0x00000000) {
+ VisitUnallocated(instr);
+ } else {
+ VisitConditionalSelect(instr);
+ }
+ break;
+ }
+ case 6: {
+ if (instr->Bit(29) == 0x1) {
+ VisitUnallocated(instr);
+ } else {
+ if (instr->Bit(30) == 0) {
+ if ((instr->Bit(15) == 0x1) ||
+ (instr->Bits(15, 11) == 0) ||
+ (instr->Bits(15, 12) == 0x1) ||
+ (instr->Bits(15, 12) == 0x3) ||
+ (instr->Bits(15, 13) == 0x3) ||
+ (instr->Mask(0x8000EC00) == 0x00004C00) ||
+ (instr->Mask(0x8000E800) == 0x80004000) ||
+ (instr->Mask(0x8000E400) == 0x80004000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitDataProcessing2Source(instr);
+ }
+ } else {
+ if ((instr->Bit(13) == 1) ||
+ (instr->Bits(20, 16) != 0) ||
+ (instr->Bits(15, 14) != 0) ||
+ (instr->Mask(0xA01FFC00) == 0x00000C00) ||
+ (instr->Mask(0x201FF800) == 0x00001800)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitDataProcessing1Source(instr);
+ }
+ }
+ break;
+ }
+ }
+ case 1:
+ case 3:
+ case 5:
+ case 7: VisitUnallocated(instr); break;
+ }
+ }
+ } else {
+ if (instr->Bit(28) == 0) {
+ if (instr->Bit(21) == 0) {
+ if ((instr->Bits(23, 22) == 0x3) ||
+ (instr->Mask(0x80008000) == 0x00008000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitAddSubShifted(instr);
+ }
+ } else {
+ if ((instr->Mask(0x00C00000) != 0x00000000) ||
+ (instr->Mask(0x00001400) == 0x00001400) ||
+ (instr->Mask(0x00001800) == 0x00001800)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitAddSubExtended(instr);
+ }
+ }
+ } else {
+ if ((instr->Bit(30) == 0x1) ||
+ (instr->Bits(30, 29) == 0x1) ||
+ (instr->Mask(0xE0600000) == 0x00200000) ||
+ (instr->Mask(0xE0608000) == 0x00400000) ||
+ (instr->Mask(0x60608000) == 0x00408000) ||
+ (instr->Mask(0x60E00000) == 0x00E00000) ||
+ (instr->Mask(0x60E00000) == 0x00800000) ||
+ (instr->Mask(0x60E00000) == 0x00600000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitDataProcessing3Source(instr);
+ }
+ }
+ }
+}
+
+
+void Decoder::DecodeFP(Instruction* instr) {
+ ASSERT((instr->Bits(27, 24) == 0xE) ||
+ (instr->Bits(27, 24) == 0xF) );
+
+ if (instr->Bit(28) == 0) {
+ DecodeAdvSIMDDataProcessing(instr);
+ } else {
+ if (instr->Bit(29) == 1) {
+ VisitUnallocated(instr);
+ } else {
+ if (instr->Bits(31, 30) == 0x3) {
+ VisitUnallocated(instr);
+ } else if (instr->Bits(31, 30) == 0x1) {
+ DecodeAdvSIMDDataProcessing(instr);
+ } else {
+ if (instr->Bit(24) == 0) {
+ if (instr->Bit(21) == 0) {
+ if ((instr->Bit(23) == 1) ||
+ (instr->Bit(18) == 1) ||
+ (instr->Mask(0x80008000) == 0x00000000) ||
+ (instr->Mask(0x000E0000) == 0x00000000) ||
+ (instr->Mask(0x000E0000) == 0x000A0000) ||
+ (instr->Mask(0x00160000) == 0x00000000) ||
+ (instr->Mask(0x00160000) == 0x00120000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitFPFixedPointConvert(instr);
+ }
+ } else {
+ if (instr->Bits(15, 10) == 32) {
+ VisitUnallocated(instr);
+ } else if (instr->Bits(15, 10) == 0) {
+ if ((instr->Bits(23, 22) == 0x3) ||
+ (instr->Mask(0x000E0000) == 0x000A0000) ||
+ (instr->Mask(0x000E0000) == 0x000C0000) ||
+ (instr->Mask(0x00160000) == 0x00120000) ||
+ (instr->Mask(0x00160000) == 0x00140000) ||
+ (instr->Mask(0x20C40000) == 0x00800000) ||
+ (instr->Mask(0x20C60000) == 0x00840000) ||
+ (instr->Mask(0xA0C60000) == 0x80060000) ||
+ (instr->Mask(0xA0C60000) == 0x00860000) ||
+ (instr->Mask(0xA0C60000) == 0x00460000) ||
+ (instr->Mask(0xA0CE0000) == 0x80860000) ||
+ (instr->Mask(0xA0CE0000) == 0x804E0000) ||
+ (instr->Mask(0xA0CE0000) == 0x000E0000) ||
+ (instr->Mask(0xA0D60000) == 0x00160000) ||
+ (instr->Mask(0xA0D60000) == 0x80560000) ||
+ (instr->Mask(0xA0D60000) == 0x80960000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitFPIntegerConvert(instr);
+ }
+ } else if (instr->Bits(14, 10) == 16) {
+ const Instr masked_A0DF8000 = instr->Mask(0xA0DF8000);
+ if ((instr->Mask(0x80180000) != 0) ||
+ (masked_A0DF8000 == 0x00020000) ||
+ (masked_A0DF8000 == 0x00030000) ||
+ (masked_A0DF8000 == 0x00068000) ||
+ (masked_A0DF8000 == 0x00428000) ||
+ (masked_A0DF8000 == 0x00430000) ||
+ (masked_A0DF8000 == 0x00468000) ||
+ (instr->Mask(0xA0D80000) == 0x00800000) ||
+ (instr->Mask(0xA0DE0000) == 0x00C00000) ||
+ (instr->Mask(0xA0DF0000) == 0x00C30000) ||
+ (instr->Mask(0xA0DC0000) == 0x00C40000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitFPDataProcessing1Source(instr);
+ }
+ } else if (instr->Bits(13, 10) == 8) {
+ if ((instr->Bits(15, 14) != 0) ||
+ (instr->Bits(2, 0) != 0) ||
+ (instr->Mask(0x80800000) != 0x00000000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitFPCompare(instr);
+ }
+ } else if (instr->Bits(12, 10) == 4) {
+ if ((instr->Bits(9, 5) != 0) ||
+ (instr->Mask(0x80800000) != 0x00000000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitFPImmediate(instr);
+ }
+ } else {
+ if (instr->Mask(0x80800000) != 0x00000000) {
+ VisitUnallocated(instr);
+ } else {
+ switch (instr->Bits(11, 10)) {
+ case 1: {
+ VisitFPConditionalCompare(instr);
+ break;
+ }
+ case 2: {
+ if ((instr->Bits(15, 14) == 0x3) ||
+ (instr->Mask(0x00009000) == 0x00009000) ||
+ (instr->Mask(0x0000A000) == 0x0000A000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitFPDataProcessing2Source(instr);
+ }
+ break;
+ }
+ case 3: {
+ VisitFPConditionalSelect(instr);
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ }
+ }
+ }
+ } else {
+ // Bit 30 == 1 has been handled earlier.
+ ASSERT(instr->Bit(30) == 0);
+ if (instr->Mask(0xA0800000) != 0) {
+ VisitUnallocated(instr);
+ } else {
+ VisitFPDataProcessing3Source(instr);
+ }
+ }
+ }
+ }
+ }
+}
+
+
+void Decoder::DecodeAdvSIMDLoadStore(Instruction* instr) {
+ // TODO: Implement Advanced SIMD load/store instruction decode.
+ ASSERT(instr->Bits(29, 25) == 0x6);
+ VisitUnimplemented(instr);
+}
+
+
+void Decoder::DecodeAdvSIMDDataProcessing(Instruction* instr) {
+ // TODO: Implement Advanced SIMD data processing instruction decode.
+ ASSERT(instr->Bits(27, 25) == 0x7);
+ VisitUnimplemented(instr);
+}
+
+
+#define DEFINE_VISITOR_CALLERS(A) \
+ void Decoder::Visit##A(Instruction *instr) { \
+ ASSERT(instr->Mask(A##FMask) == A##Fixed); \
+ std::list<DecoderVisitor*>::iterator it; \
+ for (it = visitors_.begin(); it != visitors_.end(); it++) { \
+ (*it)->Visit##A(instr); \
+ } \
+ }
+VISITOR_LIST(DEFINE_VISITOR_CALLERS)
+#undef DEFINE_VISITOR_CALLERS
+} // namespace vixl
diff --git a/disas/libvixl/a64/decoder-a64.h b/disas/libvixl/a64/decoder-a64.h
new file mode 100644
index 0000000000..bbbbd81247
--- /dev/null
+++ b/disas/libvixl/a64/decoder-a64.h
@@ -0,0 +1,198 @@
+// Copyright 2013, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef VIXL_A64_DECODER_A64_H_
+#define VIXL_A64_DECODER_A64_H_
+
+#include <list>
+
+#include "globals.h"
+#include "a64/instructions-a64.h"
+
+
+// List macro containing all visitors needed by the decoder class.
+
+#define VISITOR_LIST(V) \
+ V(PCRelAddressing) \
+ V(AddSubImmediate) \
+ V(LogicalImmediate) \
+ V(MoveWideImmediate) \
+ V(Bitfield) \
+ V(Extract) \
+ V(UnconditionalBranch) \
+ V(UnconditionalBranchToRegister) \
+ V(CompareBranch) \
+ V(TestBranch) \
+ V(ConditionalBranch) \
+ V(System) \
+ V(Exception) \
+ V(LoadStorePairPostIndex) \
+ V(LoadStorePairOffset) \
+ V(LoadStorePairPreIndex) \
+ V(LoadStorePairNonTemporal) \
+ V(LoadLiteral) \
+ V(LoadStoreUnscaledOffset) \
+ V(LoadStorePostIndex) \
+ V(LoadStorePreIndex) \
+ V(LoadStoreRegisterOffset) \
+ V(LoadStoreUnsignedOffset) \
+ V(LogicalShifted) \
+ V(AddSubShifted) \
+ V(AddSubExtended) \
+ V(AddSubWithCarry) \
+ V(ConditionalCompareRegister) \
+ V(ConditionalCompareImmediate) \
+ V(ConditionalSelect) \
+ V(DataProcessing1Source) \
+ V(DataProcessing2Source) \
+ V(DataProcessing3Source) \
+ V(FPCompare) \
+ V(FPConditionalCompare) \
+ V(FPConditionalSelect) \
+ V(FPImmediate) \
+ V(FPDataProcessing1Source) \
+ V(FPDataProcessing2Source) \
+ V(FPDataProcessing3Source) \
+ V(FPIntegerConvert) \
+ V(FPFixedPointConvert) \
+ V(Unallocated) \
+ V(Unimplemented)
+
+namespace vixl {
+
+// The Visitor interface. Disassembler and simulator (and other tools)
+// must provide implementations for all of these functions.
+class DecoderVisitor {
+ public:
+ #define DECLARE(A) virtual void Visit##A(Instruction* instr) = 0;
+ VISITOR_LIST(DECLARE)
+ #undef DECLARE
+
+ virtual ~DecoderVisitor() {}
+
+ private:
+ // Visitors are registered in a list.
+ std::list<DecoderVisitor*> visitors_;
+
+ friend class Decoder;
+};
+
+
+class Decoder: public DecoderVisitor {
+ public:
+ Decoder() {}
+
+ // Top-level instruction decoder function. Decodes an instruction and calls
+ // the visitor functions registered with the Decoder class.
+ void Decode(Instruction *instr);
+
+ // Register a new visitor class with the decoder.
+ // Decode() will call the corresponding visitor method from all registered
+ // visitor classes when decoding reaches the leaf node of the instruction
+ // decode tree.
+ // Visitors are called in the order.
+ // A visitor can only be registered once.
+ // Registering an already registered visitor will update its position.
+ //
+ // d.AppendVisitor(V1);
+ // d.AppendVisitor(V2);
+ // d.PrependVisitor(V2); // Move V2 at the start of the list.
+ // d.InsertVisitorBefore(V3, V2);
+ // d.AppendVisitor(V4);
+ // d.AppendVisitor(V4); // No effect.
+ //
+ // d.Decode(i);
+ //
+ // will call in order visitor methods in V3, V2, V1, V4.
+ void AppendVisitor(DecoderVisitor* visitor);
+ void PrependVisitor(DecoderVisitor* visitor);
+ void InsertVisitorBefore(DecoderVisitor* new_visitor,
+ DecoderVisitor* registered_visitor);
+ void InsertVisitorAfter(DecoderVisitor* new_visitor,
+ DecoderVisitor* registered_visitor);
+
+ // Remove a previously registered visitor class from the list of visitors
+ // stored by the decoder.
+ void RemoveVisitor(DecoderVisitor* visitor);
+
+ #define DECLARE(A) void Visit##A(Instruction* instr);
+ VISITOR_LIST(DECLARE)
+ #undef DECLARE
+
+ private:
+ // Decode the PC relative addressing instruction, and call the corresponding
+ // visitors.
+ // On entry, instruction bits 27:24 = 0x0.
+ void DecodePCRelAddressing(Instruction* instr);
+
+ // Decode the add/subtract immediate instruction, and call the correspoding
+ // visitors.
+ // On entry, instruction bits 27:24 = 0x1.
+ void DecodeAddSubImmediate(Instruction* instr);
+
+ // Decode the branch, system command, and exception generation parts of
+ // the instruction tree, and call the corresponding visitors.
+ // On entry, instruction bits 27:24 = {0x4, 0x5, 0x6, 0x7}.
+ void DecodeBranchSystemException(Instruction* instr);
+
+ // Decode the load and store parts of the instruction tree, and call
+ // the corresponding visitors.
+ // On entry, instruction bits 27:24 = {0x8, 0x9, 0xC, 0xD}.
+ void DecodeLoadStore(Instruction* instr);
+
+ // Decode the logical immediate and move wide immediate parts of the
+ // instruction tree, and call the corresponding visitors.
+ // On entry, instruction bits 27:24 = 0x2.
+ void DecodeLogical(Instruction* instr);
+
+ // Decode the bitfield and extraction parts of the instruction tree,
+ // and call the corresponding visitors.
+ // On entry, instruction bits 27:24 = 0x3.
+ void DecodeBitfieldExtract(Instruction* instr);
+
+ // Decode the data processing parts of the instruction tree, and call the
+ // corresponding visitors.
+ // On entry, instruction bits 27:24 = {0x1, 0xA, 0xB}.
+ void DecodeDataProcessing(Instruction* instr);
+
+ // Decode the floating point parts of the instruction tree, and call the
+ // corresponding visitors.
+ // On entry, instruction bits 27:24 = {0xE, 0xF}.
+ void DecodeFP(Instruction* instr);
+
+ // Decode the Advanced SIMD (NEON) load/store part of the instruction tree,
+ // and call the corresponding visitors.
+ // On entry, instruction bits 29:25 = 0x6.
+ void DecodeAdvSIMDLoadStore(Instruction* instr);
+
+ // Decode the Advanced SIMD (NEON) data processing part of the instruction
+ // tree, and call the corresponding visitors.
+ // On entry, instruction bits 27:25 = 0x7.
+ void DecodeAdvSIMDDataProcessing(Instruction* instr);
+};
+} // namespace vixl
+
+#endif // VIXL_A64_DECODER_A64_H_
diff --git a/disas/libvixl/a64/disasm-a64.cc b/disas/libvixl/a64/disasm-a64.cc
new file mode 100644
index 0000000000..4a49748095
--- /dev/null
+++ b/disas/libvixl/a64/disasm-a64.cc
@@ -0,0 +1,1678 @@
+// Copyright 2013, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "a64/disasm-a64.h"
+
+namespace vixl {
+
+Disassembler::Disassembler() {
+ buffer_size_ = 256;
+ buffer_ = reinterpret_cast<char*>(malloc(buffer_size_));
+ buffer_pos_ = 0;
+ own_buffer_ = true;
+}
+
+
+Disassembler::Disassembler(char* text_buffer, int buffer_size) {
+ buffer_size_ = buffer_size;
+ buffer_ = text_buffer;
+ buffer_pos_ = 0;
+ own_buffer_ = false;
+}
+
+
+Disassembler::~Disassembler() {
+ if (own_buffer_) {
+ free(buffer_);
+ }
+}
+
+
+char* Disassembler::GetOutput() {
+ return buffer_;
+}
+
+
+void Disassembler::VisitAddSubImmediate(Instruction* instr) {
+ bool rd_is_zr = RdIsZROrSP(instr);
+ bool stack_op = (rd_is_zr || RnIsZROrSP(instr)) &&
+ (instr->ImmAddSub() == 0) ? true : false;
+ const char *mnemonic = "";
+ const char *form = "'Rds, 'Rns, 'IAddSub";
+ const char *form_cmp = "'Rns, 'IAddSub";
+ const char *form_mov = "'Rds, 'Rns";
+
+ switch (instr->Mask(AddSubImmediateMask)) {
+ case ADD_w_imm:
+ case ADD_x_imm: {
+ mnemonic = "add";
+ if (stack_op) {
+ mnemonic = "mov";
+ form = form_mov;
+ }
+ break;
+ }
+ case ADDS_w_imm:
+ case ADDS_x_imm: {
+ mnemonic = "adds";
+ if (rd_is_zr) {
+ mnemonic = "cmn";
+ form = form_cmp;
+ }
+ break;
+ }
+ case SUB_w_imm:
+ case SUB_x_imm: mnemonic = "sub"; break;
+ case SUBS_w_imm:
+ case SUBS_x_imm: {
+ mnemonic = "subs";
+ if (rd_is_zr) {
+ mnemonic = "cmp";
+ form = form_cmp;
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitAddSubShifted(Instruction* instr) {
+ bool rd_is_zr = RdIsZROrSP(instr);
+ bool rn_is_zr = RnIsZROrSP(instr);
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Rn, 'Rm'HDP";
+ const char *form_cmp = "'Rn, 'Rm'HDP";
+ const char *form_neg = "'Rd, 'Rm'HDP";
+
+ switch (instr->Mask(AddSubShiftedMask)) {
+ case ADD_w_shift:
+ case ADD_x_shift: mnemonic = "add"; break;
+ case ADDS_w_shift:
+ case ADDS_x_shift: {
+ mnemonic = "adds";
+ if (rd_is_zr) {
+ mnemonic = "cmn";
+ form = form_cmp;
+ }
+ break;
+ }
+ case SUB_w_shift:
+ case SUB_x_shift: {
+ mnemonic = "sub";
+ if (rn_is_zr) {
+ mnemonic = "neg";
+ form = form_neg;
+ }
+ break;
+ }
+ case SUBS_w_shift:
+ case SUBS_x_shift: {
+ mnemonic = "subs";
+ if (rd_is_zr) {
+ mnemonic = "cmp";
+ form = form_cmp;
+ } else if (rn_is_zr) {
+ mnemonic = "negs";
+ form = form_neg;
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitAddSubExtended(Instruction* instr) {
+ bool rd_is_zr = RdIsZROrSP(instr);
+ const char *mnemonic = "";
+ Extend mode = static_cast<Extend>(instr->ExtendMode());
+ const char *form = ((mode == UXTX) || (mode == SXTX)) ?
+ "'Rds, 'Rns, 'Xm'Ext" : "'Rds, 'Rns, 'Wm'Ext";
+ const char *form_cmp = ((mode == UXTX) || (mode == SXTX)) ?
+ "'Rns, 'Xm'Ext" : "'Rns, 'Wm'Ext";
+
+ switch (instr->Mask(AddSubExtendedMask)) {
+ case ADD_w_ext:
+ case ADD_x_ext: mnemonic = "add"; break;
+ case ADDS_w_ext:
+ case ADDS_x_ext: {
+ mnemonic = "adds";
+ if (rd_is_zr) {
+ mnemonic = "cmn";
+ form = form_cmp;
+ }
+ break;
+ }
+ case SUB_w_ext:
+ case SUB_x_ext: mnemonic = "sub"; break;
+ case SUBS_w_ext:
+ case SUBS_x_ext: {
+ mnemonic = "subs";
+ if (rd_is_zr) {
+ mnemonic = "cmp";
+ form = form_cmp;
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitAddSubWithCarry(Instruction* instr) {
+ bool rn_is_zr = RnIsZROrSP(instr);
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Rn, 'Rm";
+ const char *form_neg = "'Rd, 'Rm";
+
+ switch (instr->Mask(AddSubWithCarryMask)) {
+ case ADC_w:
+ case ADC_x: mnemonic = "adc"; break;
+ case ADCS_w:
+ case ADCS_x: mnemonic = "adcs"; break;
+ case SBC_w:
+ case SBC_x: {
+ mnemonic = "sbc";
+ if (rn_is_zr) {
+ mnemonic = "ngc";
+ form = form_neg;
+ }
+ break;
+ }
+ case SBCS_w:
+ case SBCS_x: {
+ mnemonic = "sbcs";
+ if (rn_is_zr) {
+ mnemonic = "ngcs";
+ form = form_neg;
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLogicalImmediate(Instruction* instr) {
+ bool rd_is_zr = RdIsZROrSP(instr);
+ bool rn_is_zr = RnIsZROrSP(instr);
+ const char *mnemonic = "";
+ const char *form = "'Rds, 'Rn, 'ITri";
+
+ if (instr->ImmLogical() == 0) {
+ // The immediate encoded in the instruction is not in the expected format.
+ Format(instr, "unallocated", "(LogicalImmediate)");
+ return;
+ }
+
+ switch (instr->Mask(LogicalImmediateMask)) {
+ case AND_w_imm:
+ case AND_x_imm: mnemonic = "and"; break;
+ case ORR_w_imm:
+ case ORR_x_imm: {
+ mnemonic = "orr";
+ unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSize
+ : kWRegSize;
+ if (rn_is_zr && !IsMovzMovnImm(reg_size, instr->ImmLogical())) {
+ mnemonic = "mov";
+ form = "'Rds, 'ITri";
+ }
+ break;
+ }
+ case EOR_w_imm:
+ case EOR_x_imm: mnemonic = "eor"; break;
+ case ANDS_w_imm:
+ case ANDS_x_imm: {
+ mnemonic = "ands";
+ if (rd_is_zr) {
+ mnemonic = "tst";
+ form = "'Rn, 'ITri";
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+bool Disassembler::IsMovzMovnImm(unsigned reg_size, uint64_t value) {
+ ASSERT((reg_size == kXRegSize) ||
+ ((reg_size == kWRegSize) && (value <= 0xffffffff)));
+
+ // Test for movz: 16 bits set at positions 0, 16, 32 or 48.
+ if (((value & 0xffffffffffff0000UL) == 0UL) ||
+ ((value & 0xffffffff0000ffffUL) == 0UL) ||
+ ((value & 0xffff0000ffffffffUL) == 0UL) ||
+ ((value & 0x0000ffffffffffffUL) == 0UL)) {
+ return true;
+ }
+
+ // Test for movn: NOT(16 bits set at positions 0, 16, 32 or 48).
+ if ((reg_size == kXRegSize) &&
+ (((value & 0xffffffffffff0000UL) == 0xffffffffffff0000UL) ||
+ ((value & 0xffffffff0000ffffUL) == 0xffffffff0000ffffUL) ||
+ ((value & 0xffff0000ffffffffUL) == 0xffff0000ffffffffUL) ||
+ ((value & 0x0000ffffffffffffUL) == 0x0000ffffffffffffUL))) {
+ return true;
+ }
+ if ((reg_size == kWRegSize) &&
+ (((value & 0xffff0000) == 0xffff0000) ||
+ ((value & 0x0000ffff) == 0x0000ffff))) {
+ return true;
+ }
+ return false;
+}
+
+
+void Disassembler::VisitLogicalShifted(Instruction* instr) {
+ bool rd_is_zr = RdIsZROrSP(instr);
+ bool rn_is_zr = RnIsZROrSP(instr);
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Rn, 'Rm'HLo";
+
+ switch (instr->Mask(LogicalShiftedMask)) {
+ case AND_w:
+ case AND_x: mnemonic = "and"; break;
+ case BIC_w:
+ case BIC_x: mnemonic = "bic"; break;
+ case EOR_w:
+ case EOR_x: mnemonic = "eor"; break;
+ case EON_w:
+ case EON_x: mnemonic = "eon"; break;
+ case BICS_w:
+ case BICS_x: mnemonic = "bics"; break;
+ case ANDS_w:
+ case ANDS_x: {
+ mnemonic = "ands";
+ if (rd_is_zr) {
+ mnemonic = "tst";
+ form = "'Rn, 'Rm'HLo";
+ }
+ break;
+ }
+ case ORR_w:
+ case ORR_x: {
+ mnemonic = "orr";
+ if (rn_is_zr && (instr->ImmDPShift() == 0) && (instr->ShiftDP() == LSL)) {
+ mnemonic = "mov";
+ form = "'Rd, 'Rm";
+ }
+ break;
+ }
+ case ORN_w:
+ case ORN_x: {
+ mnemonic = "orn";
+ if (rn_is_zr) {
+ mnemonic = "mvn";
+ form = "'Rd, 'Rm'HLo";
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitConditionalCompareRegister(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rn, 'Rm, 'INzcv, 'Cond";
+
+ switch (instr->Mask(ConditionalCompareRegisterMask)) {
+ case CCMN_w:
+ case CCMN_x: mnemonic = "ccmn"; break;
+ case CCMP_w:
+ case CCMP_x: mnemonic = "ccmp"; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitConditionalCompareImmediate(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rn, 'IP, 'INzcv, 'Cond";
+
+ switch (instr->Mask(ConditionalCompareImmediateMask)) {
+ case CCMN_w_imm:
+ case CCMN_x_imm: mnemonic = "ccmn"; break;
+ case CCMP_w_imm:
+ case CCMP_x_imm: mnemonic = "ccmp"; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitConditionalSelect(Instruction* instr) {
+ bool rnm_is_zr = (RnIsZROrSP(instr) && RmIsZROrSP(instr));
+ bool rn_is_rm = (instr->Rn() == instr->Rm());
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Rn, 'Rm, 'Cond";
+ const char *form_test = "'Rd, 'CInv";
+ const char *form_update = "'Rd, 'Rn, 'CInv";
+
+ Condition cond = static_cast<Condition>(instr->Condition());
+ bool invertible_cond = (cond != al) && (cond != nv);
+
+ switch (instr->Mask(ConditionalSelectMask)) {
+ case CSEL_w:
+ case CSEL_x: mnemonic = "csel"; break;
+ case CSINC_w:
+ case CSINC_x: {
+ mnemonic = "csinc";
+ if (rnm_is_zr && invertible_cond) {
+ mnemonic = "cset";
+ form = form_test;
+ } else if (rn_is_rm && invertible_cond) {
+ mnemonic = "cinc";
+ form = form_update;
+ }
+ break;
+ }
+ case CSINV_w:
+ case CSINV_x: {
+ mnemonic = "csinv";
+ if (rnm_is_zr && invertible_cond) {
+ mnemonic = "csetm";
+ form = form_test;
+ } else if (rn_is_rm && invertible_cond) {
+ mnemonic = "cinv";
+ form = form_update;
+ }
+ break;
+ }
+ case CSNEG_w:
+ case CSNEG_x: {
+ mnemonic = "csneg";
+ if (rn_is_rm && invertible_cond) {
+ mnemonic = "cneg";
+ form = form_update;
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitBitfield(Instruction* instr) {
+ unsigned s = instr->ImmS();
+ unsigned r = instr->ImmR();
+ unsigned rd_size_minus_1 =
+ ((instr->SixtyFourBits() == 1) ? kXRegSize : kWRegSize) - 1;
+ const char *mnemonic = "";
+ const char *form = "";
+ const char *form_shift_right = "'Rd, 'Rn, 'IBr";
+ const char *form_extend = "'Rd, 'Wn";
+ const char *form_bfiz = "'Rd, 'Rn, 'IBZ-r, 'IBs+1";
+ const char *form_bfx = "'Rd, 'Rn, 'IBr, 'IBs-r+1";
+ const char *form_lsl = "'Rd, 'Rn, 'IBZ-r";
+
+ switch (instr->Mask(BitfieldMask)) {
+ case SBFM_w:
+ case SBFM_x: {
+ mnemonic = "sbfx";
+ form = form_bfx;
+ if (r == 0) {
+ form = form_extend;
+ if (s == 7) {
+ mnemonic = "sxtb";
+ } else if (s == 15) {
+ mnemonic = "sxth";
+ } else if ((s == 31) && (instr->SixtyFourBits() == 1)) {
+ mnemonic = "sxtw";
+ } else {
+ form = form_bfx;
+ }
+ } else if (s == rd_size_minus_1) {
+ mnemonic = "asr";
+ form = form_shift_right;
+ } else if (s < r) {
+ mnemonic = "sbfiz";
+ form = form_bfiz;
+ }
+ break;
+ }
+ case UBFM_w:
+ case UBFM_x: {
+ mnemonic = "ubfx";
+ form = form_bfx;
+ if (r == 0) {
+ form = form_extend;
+ if (s == 7) {
+ mnemonic = "uxtb";
+ } else if (s == 15) {
+ mnemonic = "uxth";
+ } else {
+ form = form_bfx;
+ }
+ }
+ if (s == rd_size_minus_1) {
+ mnemonic = "lsr";
+ form = form_shift_right;
+ } else if (r == s + 1) {
+ mnemonic = "lsl";
+ form = form_lsl;
+ } else if (s < r) {
+ mnemonic = "ubfiz";
+ form = form_bfiz;
+ }
+ break;
+ }
+ case BFM_w:
+ case BFM_x: {
+ mnemonic = "bfxil";
+ form = form_bfx;
+ if (s < r) {
+ mnemonic = "bfi";
+ form = form_bfiz;
+ }
+ }
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitExtract(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Rn, 'Rm, 'IExtract";
+
+ switch (instr->Mask(ExtractMask)) {
+ case EXTR_w:
+ case EXTR_x: {
+ if (instr->Rn() == instr->Rm()) {
+ mnemonic = "ror";
+ form = "'Rd, 'Rn, 'IExtract";
+ } else {
+ mnemonic = "extr";
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitPCRelAddressing(Instruction* instr) {
+ switch (instr->Mask(PCRelAddressingMask)) {
+ case ADR: Format(instr, "adr", "'Xd, 'AddrPCRelByte"); break;
+ // ADRP is not implemented.
+ default: Format(instr, "unimplemented", "(PCRelAddressing)");
+ }
+}
+
+
+void Disassembler::VisitConditionalBranch(Instruction* instr) {
+ switch (instr->Mask(ConditionalBranchMask)) {
+ case B_cond: Format(instr, "b.'CBrn", "'BImmCond"); break;
+ default: UNREACHABLE();
+ }
+}
+
+
+void Disassembler::VisitUnconditionalBranchToRegister(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Xn";
+
+ switch (instr->Mask(UnconditionalBranchToRegisterMask)) {
+ case BR: mnemonic = "br"; break;
+ case BLR: mnemonic = "blr"; break;
+ case RET: {
+ mnemonic = "ret";
+ if (instr->Rn() == kLinkRegCode) {
+ form = NULL;
+ }
+ break;
+ }
+ default: form = "(UnconditionalBranchToRegister)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitUnconditionalBranch(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'BImmUncn";
+
+ switch (instr->Mask(UnconditionalBranchMask)) {
+ case B: mnemonic = "b"; break;
+ case BL: mnemonic = "bl"; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitDataProcessing1Source(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Rn";
+
+ switch (instr->Mask(DataProcessing1SourceMask)) {
+ #define FORMAT(A, B) \
+ case A##_w: \
+ case A##_x: mnemonic = B; break;
+ FORMAT(RBIT, "rbit");
+ FORMAT(REV16, "rev16");
+ FORMAT(REV, "rev");
+ FORMAT(CLZ, "clz");
+ FORMAT(CLS, "cls");
+ #undef FORMAT
+ case REV32_x: mnemonic = "rev32"; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitDataProcessing2Source(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Rd, 'Rn, 'Rm";
+
+ switch (instr->Mask(DataProcessing2SourceMask)) {
+ #define FORMAT(A, B) \
+ case A##_w: \
+ case A##_x: mnemonic = B; break;
+ FORMAT(UDIV, "udiv");
+ FORMAT(SDIV, "sdiv");
+ FORMAT(LSLV, "lsl");
+ FORMAT(LSRV, "lsr");
+ FORMAT(ASRV, "asr");
+ FORMAT(RORV, "ror");
+ #undef FORMAT
+ default: form = "(DataProcessing2Source)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitDataProcessing3Source(Instruction* instr) {
+ bool ra_is_zr = RaIsZROrSP(instr);
+ const char *mnemonic = "";
+ const char *form = "'Xd, 'Wn, 'Wm, 'Xa";
+ const char *form_rrr = "'Rd, 'Rn, 'Rm";
+ const char *form_rrrr = "'Rd, 'Rn, 'Rm, 'Ra";
+ const char *form_xww = "'Xd, 'Wn, 'Wm";
+ const char *form_xxx = "'Xd, 'Xn, 'Xm";
+
+ switch (instr->Mask(DataProcessing3SourceMask)) {
+ case MADD_w:
+ case MADD_x: {
+ mnemonic = "madd";
+ form = form_rrrr;
+ if (ra_is_zr) {
+ mnemonic = "mul";
+ form = form_rrr;
+ }
+ break;
+ }
+ case MSUB_w:
+ case MSUB_x: {
+ mnemonic = "msub";
+ form = form_rrrr;
+ if (ra_is_zr) {
+ mnemonic = "mneg";
+ form = form_rrr;
+ }
+ break;
+ }
+ case SMADDL_x: {
+ mnemonic = "smaddl";
+ if (ra_is_zr) {
+ mnemonic = "smull";
+ form = form_xww;
+ }
+ break;
+ }
+ case SMSUBL_x: {
+ mnemonic = "smsubl";
+ if (ra_is_zr) {
+ mnemonic = "smnegl";
+ form = form_xww;
+ }
+ break;
+ }
+ case UMADDL_x: {
+ mnemonic = "umaddl";
+ if (ra_is_zr) {
+ mnemonic = "umull";
+ form = form_xww;
+ }
+ break;
+ }
+ case UMSUBL_x: {
+ mnemonic = "umsubl";
+ if (ra_is_zr) {
+ mnemonic = "umnegl";
+ form = form_xww;
+ }
+ break;
+ }
+ case SMULH_x: {
+ mnemonic = "smulh";
+ form = form_xxx;
+ break;
+ }
+ case UMULH_x: {
+ mnemonic = "umulh";
+ form = form_xxx;
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitCompareBranch(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rt, 'BImmCmpa";
+
+ switch (instr->Mask(CompareBranchMask)) {
+ case CBZ_w:
+ case CBZ_x: mnemonic = "cbz"; break;
+ case CBNZ_w:
+ case CBNZ_x: mnemonic = "cbnz"; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitTestBranch(Instruction* instr) {
+ const char *mnemonic = "";
+ // If the top bit of the immediate is clear, the tested register is
+ // disassembled as Wt, otherwise Xt. As the top bit of the immediate is
+ // encoded in bit 31 of the instruction, we can reuse the Rt form, which
+ // uses bit 31 (normally "sf") to choose the register size.
+ const char *form = "'Rt, 'IS, 'BImmTest";
+
+ switch (instr->Mask(TestBranchMask)) {
+ case TBZ: mnemonic = "tbz"; break;
+ case TBNZ: mnemonic = "tbnz"; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitMoveWideImmediate(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'IMoveImm";
+
+ // Print the shift separately for movk, to make it clear which half word will
+ // be overwritten. Movn and movz print the computed immediate, which includes
+ // shift calculation.
+ switch (instr->Mask(MoveWideImmediateMask)) {
+ case MOVN_w:
+ case MOVN_x: mnemonic = "movn"; break;
+ case MOVZ_w:
+ case MOVZ_x: mnemonic = "movz"; break;
+ case MOVK_w:
+ case MOVK_x: mnemonic = "movk"; form = "'Rd, 'IMoveLSL"; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+#define LOAD_STORE_LIST(V) \
+ V(STRB_w, "strb", "'Wt") \
+ V(STRH_w, "strh", "'Wt") \
+ V(STR_w, "str", "'Wt") \
+ V(STR_x, "str", "'Xt") \
+ V(LDRB_w, "ldrb", "'Wt") \
+ V(LDRH_w, "ldrh", "'Wt") \
+ V(LDR_w, "ldr", "'Wt") \
+ V(LDR_x, "ldr", "'Xt") \
+ V(LDRSB_x, "ldrsb", "'Xt") \
+ V(LDRSH_x, "ldrsh", "'Xt") \
+ V(LDRSW_x, "ldrsw", "'Xt") \
+ V(LDRSB_w, "ldrsb", "'Wt") \
+ V(LDRSH_w, "ldrsh", "'Wt") \
+ V(STR_s, "str", "'St") \
+ V(STR_d, "str", "'Dt") \
+ V(LDR_s, "ldr", "'St") \
+ V(LDR_d, "ldr", "'Dt")
+
+void Disassembler::VisitLoadStorePreIndex(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStorePreIndex)";
+
+ switch (instr->Mask(LoadStorePreIndexMask)) {
+ #define LS_PREINDEX(A, B, C) \
+ case A##_pre: mnemonic = B; form = C ", ['Xns'ILS]!"; break;
+ LOAD_STORE_LIST(LS_PREINDEX)
+ #undef LS_PREINDEX
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStorePostIndex(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStorePostIndex)";
+
+ switch (instr->Mask(LoadStorePostIndexMask)) {
+ #define LS_POSTINDEX(A, B, C) \
+ case A##_post: mnemonic = B; form = C ", ['Xns]'ILS"; break;
+ LOAD_STORE_LIST(LS_POSTINDEX)
+ #undef LS_POSTINDEX
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStoreUnsignedOffset(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStoreUnsignedOffset)";
+
+ switch (instr->Mask(LoadStoreUnsignedOffsetMask)) {
+ #define LS_UNSIGNEDOFFSET(A, B, C) \
+ case A##_unsigned: mnemonic = B; form = C ", ['Xns'ILU]"; break;
+ LOAD_STORE_LIST(LS_UNSIGNEDOFFSET)
+ #undef LS_UNSIGNEDOFFSET
+ case PRFM_unsigned: mnemonic = "prfm"; form = "'PrefOp, ['Xn'ILU]";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStoreRegisterOffset(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStoreRegisterOffset)";
+
+ switch (instr->Mask(LoadStoreRegisterOffsetMask)) {
+ #define LS_REGISTEROFFSET(A, B, C) \
+ case A##_reg: mnemonic = B; form = C ", ['Xns, 'Offsetreg]"; break;
+ LOAD_STORE_LIST(LS_REGISTEROFFSET)
+ #undef LS_REGISTEROFFSET
+ case PRFM_reg: mnemonic = "prfm"; form = "'PrefOp, ['Xns, 'Offsetreg]";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStoreUnscaledOffset(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Wt, ['Xns'ILS]";
+ const char *form_x = "'Xt, ['Xns'ILS]";
+ const char *form_s = "'St, ['Xns'ILS]";
+ const char *form_d = "'Dt, ['Xns'ILS]";
+
+ switch (instr->Mask(LoadStoreUnscaledOffsetMask)) {
+ case STURB_w: mnemonic = "sturb"; break;
+ case STURH_w: mnemonic = "sturh"; break;
+ case STUR_w: mnemonic = "stur"; break;
+ case STUR_x: mnemonic = "stur"; form = form_x; break;
+ case STUR_s: mnemonic = "stur"; form = form_s; break;
+ case STUR_d: mnemonic = "stur"; form = form_d; break;
+ case LDURB_w: mnemonic = "ldurb"; break;
+ case LDURH_w: mnemonic = "ldurh"; break;
+ case LDUR_w: mnemonic = "ldur"; break;
+ case LDUR_x: mnemonic = "ldur"; form = form_x; break;
+ case LDUR_s: mnemonic = "ldur"; form = form_s; break;
+ case LDUR_d: mnemonic = "ldur"; form = form_d; break;
+ case LDURSB_x: form = form_x; // Fall through.
+ case LDURSB_w: mnemonic = "ldursb"; break;
+ case LDURSH_x: form = form_x; // Fall through.
+ case LDURSH_w: mnemonic = "ldursh"; break;
+ case LDURSW_x: mnemonic = "ldursw"; form = form_x; break;
+ default: form = "(LoadStoreUnscaledOffset)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadLiteral(Instruction* instr) {
+ const char *mnemonic = "ldr";
+ const char *form = "(LoadLiteral)";
+
+ switch (instr->Mask(LoadLiteralMask)) {
+ case LDR_w_lit: form = "'Wt, 'ILLiteral 'LValue"; break;
+ case LDR_x_lit: form = "'Xt, 'ILLiteral 'LValue"; break;
+ case LDR_s_lit: form = "'St, 'ILLiteral 'LValue"; break;
+ case LDR_d_lit: form = "'Dt, 'ILLiteral 'LValue"; break;
+ default: mnemonic = "unimplemented";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+#define LOAD_STORE_PAIR_LIST(V) \
+ V(STP_w, "stp", "'Wt, 'Wt2", "4") \
+ V(LDP_w, "ldp", "'Wt, 'Wt2", "4") \
+ V(LDPSW_x, "ldpsw", "'Xt, 'Xt2", "4") \
+ V(STP_x, "stp", "'Xt, 'Xt2", "8") \
+ V(LDP_x, "ldp", "'Xt, 'Xt2", "8") \
+ V(STP_s, "stp", "'St, 'St2", "4") \
+ V(LDP_s, "ldp", "'St, 'St2", "4") \
+ V(STP_d, "stp", "'Dt, 'Dt2", "8") \
+ V(LDP_d, "ldp", "'Dt, 'Dt2", "8")
+
+void Disassembler::VisitLoadStorePairPostIndex(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStorePairPostIndex)";
+
+ switch (instr->Mask(LoadStorePairPostIndexMask)) {
+ #define LSP_POSTINDEX(A, B, C, D) \
+ case A##_post: mnemonic = B; form = C ", ['Xns]'ILP" D; break;
+ LOAD_STORE_PAIR_LIST(LSP_POSTINDEX)
+ #undef LSP_POSTINDEX
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStorePairPreIndex(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStorePairPreIndex)";
+
+ switch (instr->Mask(LoadStorePairPreIndexMask)) {
+ #define LSP_PREINDEX(A, B, C, D) \
+ case A##_pre: mnemonic = B; form = C ", ['Xns'ILP" D "]!"; break;
+ LOAD_STORE_PAIR_LIST(LSP_PREINDEX)
+ #undef LSP_PREINDEX
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStorePairOffset(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStorePairOffset)";
+
+ switch (instr->Mask(LoadStorePairOffsetMask)) {
+ #define LSP_OFFSET(A, B, C, D) \
+ case A##_off: mnemonic = B; form = C ", ['Xns'ILP" D "]"; break;
+ LOAD_STORE_PAIR_LIST(LSP_OFFSET)
+ #undef LSP_OFFSET
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStorePairNonTemporal(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form;
+
+ switch (instr->Mask(LoadStorePairNonTemporalMask)) {
+ case STNP_w: mnemonic = "stnp"; form = "'Wt, 'Wt2, ['Xns'ILP4]"; break;
+ case LDNP_w: mnemonic = "ldnp"; form = "'Wt, 'Wt2, ['Xns'ILP4]"; break;
+ case STNP_x: mnemonic = "stnp"; form = "'Xt, 'Xt2, ['Xns'ILP8]"; break;
+ case LDNP_x: mnemonic = "ldnp"; form = "'Xt, 'Xt2, ['Xns'ILP8]"; break;
+ case STNP_s: mnemonic = "stnp"; form = "'St, 'St2, ['Xns'ILP4]"; break;
+ case LDNP_s: mnemonic = "ldnp"; form = "'St, 'St2, ['Xns'ILP4]"; break;
+ case STNP_d: mnemonic = "stnp"; form = "'Dt, 'Dt2, ['Xns'ILP8]"; break;
+ case LDNP_d: mnemonic = "ldnp"; form = "'Dt, 'Dt2, ['Xns'ILP8]"; break;
+ default: form = "(LoadStorePairNonTemporal)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPCompare(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Fn, 'Fm";
+ const char *form_zero = "'Fn, #0.0";
+
+ switch (instr->Mask(FPCompareMask)) {
+ case FCMP_s_zero:
+ case FCMP_d_zero: form = form_zero; // Fall through.
+ case FCMP_s:
+ case FCMP_d: mnemonic = "fcmp"; break;
+ default: form = "(FPCompare)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPConditionalCompare(Instruction* instr) {
+ const char *mnemonic = "unmplemented";
+ const char *form = "'Fn, 'Fm, 'INzcv, 'Cond";
+
+ switch (instr->Mask(FPConditionalCompareMask)) {
+ case FCCMP_s:
+ case FCCMP_d: mnemonic = "fccmp"; break;
+ case FCCMPE_s:
+ case FCCMPE_d: mnemonic = "fccmpe"; break;
+ default: form = "(FPConditionalCompare)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPConditionalSelect(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Fd, 'Fn, 'Fm, 'Cond";
+
+ switch (instr->Mask(FPConditionalSelectMask)) {
+ case FCSEL_s:
+ case FCSEL_d: mnemonic = "fcsel"; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPDataProcessing1Source(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Fd, 'Fn";
+
+ switch (instr->Mask(FPDataProcessing1SourceMask)) {
+ #define FORMAT(A, B) \
+ case A##_s: \
+ case A##_d: mnemonic = B; break;
+ FORMAT(FMOV, "fmov");
+ FORMAT(FABS, "fabs");
+ FORMAT(FNEG, "fneg");
+ FORMAT(FSQRT, "fsqrt");
+ FORMAT(FRINTN, "frintn");
+ FORMAT(FRINTP, "frintp");
+ FORMAT(FRINTM, "frintm");
+ FORMAT(FRINTZ, "frintz");
+ FORMAT(FRINTA, "frinta");
+ FORMAT(FRINTX, "frintx");
+ FORMAT(FRINTI, "frinti");
+ #undef FORMAT
+ case FCVT_ds: mnemonic = "fcvt"; form = "'Dd, 'Sn"; break;
+ case FCVT_sd: mnemonic = "fcvt"; form = "'Sd, 'Dn"; break;
+ default: form = "(FPDataProcessing1Source)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPDataProcessing2Source(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Fd, 'Fn, 'Fm";
+
+ switch (instr->Mask(FPDataProcessing2SourceMask)) {
+ #define FORMAT(A, B) \
+ case A##_s: \
+ case A##_d: mnemonic = B; break;
+ FORMAT(FMUL, "fmul");
+ FORMAT(FDIV, "fdiv");
+ FORMAT(FADD, "fadd");
+ FORMAT(FSUB, "fsub");
+ FORMAT(FMAX, "fmax");
+ FORMAT(FMIN, "fmin");
+ FORMAT(FMAXNM, "fmaxnm");
+ FORMAT(FMINNM, "fminnm");
+ FORMAT(FNMUL, "fnmul");
+ #undef FORMAT
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPDataProcessing3Source(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Fd, 'Fn, 'Fm, 'Fa";
+
+ switch (instr->Mask(FPDataProcessing3SourceMask)) {
+ #define FORMAT(A, B) \
+ case A##_s: \
+ case A##_d: mnemonic = B; break;
+ FORMAT(FMADD, "fmadd");
+ FORMAT(FMSUB, "fmsub");
+ FORMAT(FNMADD, "fnmadd");
+ FORMAT(FNMSUB, "fnmsub");
+ #undef FORMAT
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPImmediate(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "(FPImmediate)";
+
+ switch (instr->Mask(FPImmediateMask)) {
+ case FMOV_s_imm: mnemonic = "fmov"; form = "'Sd, 'IFPSingle"; break;
+ case FMOV_d_imm: mnemonic = "fmov"; form = "'Dd, 'IFPDouble"; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPIntegerConvert(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(FPIntegerConvert)";
+ const char *form_rf = "'Rd, 'Fn";
+ const char *form_fr = "'Fd, 'Rn";
+
+ switch (instr->Mask(FPIntegerConvertMask)) {
+ case FMOV_ws:
+ case FMOV_xd: mnemonic = "fmov"; form = form_rf; break;
+ case FMOV_sw:
+ case FMOV_dx: mnemonic = "fmov"; form = form_fr; break;
+ case FCVTMS_ws:
+ case FCVTMS_xs:
+ case FCVTMS_wd:
+ case FCVTMS_xd: mnemonic = "fcvtms"; form = form_rf; break;
+ case FCVTMU_ws:
+ case FCVTMU_xs:
+ case FCVTMU_wd:
+ case FCVTMU_xd: mnemonic = "fcvtmu"; form = form_rf; break;
+ case FCVTNS_ws:
+ case FCVTNS_xs:
+ case FCVTNS_wd:
+ case FCVTNS_xd: mnemonic = "fcvtns"; form = form_rf; break;
+ case FCVTNU_ws:
+ case FCVTNU_xs:
+ case FCVTNU_wd:
+ case FCVTNU_xd: mnemonic = "fcvtnu"; form = form_rf; break;
+ case FCVTZU_xd:
+ case FCVTZU_ws:
+ case FCVTZU_wd:
+ case FCVTZU_xs: mnemonic = "fcvtzu"; form = form_rf; break;
+ case FCVTZS_xd:
+ case FCVTZS_wd:
+ case FCVTZS_xs:
+ case FCVTZS_ws: mnemonic = "fcvtzs"; form = form_rf; break;
+ case SCVTF_sw:
+ case SCVTF_sx:
+ case SCVTF_dw:
+ case SCVTF_dx: mnemonic = "scvtf"; form = form_fr; break;
+ case UCVTF_sw:
+ case UCVTF_sx:
+ case UCVTF_dw:
+ case UCVTF_dx: mnemonic = "ucvtf"; form = form_fr; break;
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPFixedPointConvert(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Fn, 'IFPFBits";
+ const char *form_fr = "'Fd, 'Rn, 'IFPFBits";
+
+ switch (instr->Mask(FPFixedPointConvertMask)) {
+ case FCVTZS_ws_fixed:
+ case FCVTZS_xs_fixed:
+ case FCVTZS_wd_fixed:
+ case FCVTZS_xd_fixed: mnemonic = "fcvtzs"; break;
+ case FCVTZU_ws_fixed:
+ case FCVTZU_xs_fixed:
+ case FCVTZU_wd_fixed:
+ case FCVTZU_xd_fixed: mnemonic = "fcvtzu"; break;
+ case SCVTF_sw_fixed:
+ case SCVTF_sx_fixed:
+ case SCVTF_dw_fixed:
+ case SCVTF_dx_fixed: mnemonic = "scvtf"; form = form_fr; break;
+ case UCVTF_sw_fixed:
+ case UCVTF_sx_fixed:
+ case UCVTF_dw_fixed:
+ case UCVTF_dx_fixed: mnemonic = "ucvtf"; form = form_fr; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitSystem(Instruction* instr) {
+ // Some system instructions hijack their Op and Cp fields to represent a
+ // range of immediates instead of indicating a different instruction. This
+ // makes the decoding tricky.
+ const char *mnemonic = "unimplemented";
+ const char *form = "(System)";
+
+ if (instr->Mask(SystemSysRegFMask) == SystemSysRegFixed) {
+ switch (instr->Mask(SystemSysRegMask)) {
+ case MRS: {
+ mnemonic = "mrs";
+ switch (instr->ImmSystemRegister()) {
+ case NZCV: form = "'Xt, nzcv"; break;
+ case FPCR: form = "'Xt, fpcr"; break;
+ default: form = "'Xt, (unknown)"; break;
+ }
+ break;
+ }
+ case MSR: {
+ mnemonic = "msr";
+ switch (instr->ImmSystemRegister()) {
+ case NZCV: form = "nzcv, 'Xt"; break;
+ case FPCR: form = "fpcr, 'Xt"; break;
+ default: form = "(unknown), 'Xt"; break;
+ }
+ break;
+ }
+ }
+ } else if (instr->Mask(SystemHintFMask) == SystemHintFixed) {
+ ASSERT(instr->Mask(SystemHintMask) == HINT);
+ switch (instr->ImmHint()) {
+ case NOP: {
+ mnemonic = "nop";
+ form = NULL;
+ break;
+ }
+ }
+ }
+
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitException(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'IDebug";
+
+ switch (instr->Mask(ExceptionMask)) {
+ case HLT: mnemonic = "hlt"; break;
+ case BRK: mnemonic = "brk"; break;
+ case SVC: mnemonic = "svc"; break;
+ case HVC: mnemonic = "hvc"; break;
+ case SMC: mnemonic = "smc"; break;
+ case DCPS1: mnemonic = "dcps1"; form = "{'IDebug}"; break;
+ case DCPS2: mnemonic = "dcps2"; form = "{'IDebug}"; break;
+ case DCPS3: mnemonic = "dcps3"; form = "{'IDebug}"; break;
+ default: form = "(Exception)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitUnimplemented(Instruction* instr) {
+ Format(instr, "unimplemented", "(Unimplemented)");
+}
+
+
+void Disassembler::VisitUnallocated(Instruction* instr) {
+ Format(instr, "unallocated", "(Unallocated)");
+}
+
+
+void Disassembler::ProcessOutput(Instruction* /*instr*/) {
+ // The base disasm does nothing more than disassembling into a buffer.
+}
+
+
+void Disassembler::Format(Instruction* instr, const char* mnemonic,
+ const char* format) {
+ ASSERT(mnemonic != NULL);
+ ResetOutput();
+ Substitute(instr, mnemonic);
+ if (format != NULL) {
+ buffer_[buffer_pos_++] = ' ';
+ Substitute(instr, format);
+ }
+ buffer_[buffer_pos_] = 0;
+ ProcessOutput(instr);
+}
+
+
+void Disassembler::Substitute(Instruction* instr, const char* string) {
+ char chr = *string++;
+ while (chr != '\0') {
+ if (chr == '\'') {
+ string += SubstituteField(instr, string);
+ } else {
+ buffer_[buffer_pos_++] = chr;
+ }
+ chr = *string++;
+ }
+}
+
+
+int Disassembler::SubstituteField(Instruction* instr, const char* format) {
+ switch (format[0]) {
+ case 'R': // Register. X or W, selected by sf bit.
+ case 'F': // FP Register. S or D, selected by type field.
+ case 'W':
+ case 'X':
+ case 'S':
+ case 'D': return SubstituteRegisterField(instr, format);
+ case 'I': return SubstituteImmediateField(instr, format);
+ case 'L': return SubstituteLiteralField(instr, format);
+ case 'H': return SubstituteShiftField(instr, format);
+ case 'P': return SubstitutePrefetchField(instr, format);
+ case 'C': return SubstituteConditionField(instr, format);
+ case 'E': return SubstituteExtendField(instr, format);
+ case 'A': return SubstitutePCRelAddressField(instr, format);
+ case 'B': return SubstituteBranchTargetField(instr, format);
+ case 'O': return SubstituteLSRegOffsetField(instr, format);
+ default: {
+ UNREACHABLE();
+ return 1;
+ }
+ }
+}
+
+
+int Disassembler::SubstituteRegisterField(Instruction* instr,
+ const char* format) {
+ unsigned reg_num = 0;
+ unsigned field_len = 2;
+ switch (format[1]) {
+ case 'd': reg_num = instr->Rd(); break;
+ case 'n': reg_num = instr->Rn(); break;
+ case 'm': reg_num = instr->Rm(); break;
+ case 'a': reg_num = instr->Ra(); break;
+ case 't': {
+ if (format[2] == '2') {
+ reg_num = instr->Rt2();
+ field_len = 3;
+ } else {
+ reg_num = instr->Rt();
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+
+ // Increase field length for registers tagged as stack.
+ if (format[2] == 's') {
+ field_len = 3;
+ }
+
+ char reg_type;
+ if (format[0] == 'R') {
+ // Register type is R: use sf bit to choose X and W.
+ reg_type = instr->SixtyFourBits() ? 'x' : 'w';
+ } else if (format[0] == 'F') {
+ // Floating-point register: use type field to choose S or D.
+ reg_type = ((instr->FPType() & 1) == 0) ? 's' : 'd';
+ } else {
+ // Register type is specified. Make it lower case.
+ reg_type = format[0] + 0x20;
+ }
+
+ if ((reg_num != kZeroRegCode) || (reg_type == 's') || (reg_type == 'd')) {
+ // A normal register: w0 - w30, x0 - x30, s0 - s31, d0 - d31.
+ AppendToOutput("%c%d", reg_type, reg_num);
+ } else if (format[2] == 's') {
+ // Disassemble w31/x31 as stack pointer wsp/sp.
+ AppendToOutput("%s", (reg_type == 'w') ? "wsp" : "sp");
+ } else {
+ // Disassemble w31/x31 as zero register wzr/xzr.
+ AppendToOutput("%czr", reg_type);
+ }
+
+ return field_len;
+}
+
+
+int Disassembler::SubstituteImmediateField(Instruction* instr,
+ const char* format) {
+ ASSERT(format[0] == 'I');
+
+ switch (format[1]) {
+ case 'M': { // IMoveImm or IMoveLSL.
+ if (format[5] == 'I') {
+ uint64_t imm = instr->ImmMoveWide() << (16 * instr->ShiftMoveWide());
+ AppendToOutput("#0x%" PRIx64, imm);
+ } else {
+ ASSERT(format[5] == 'L');
+ AppendToOutput("#0x%" PRIx64, instr->ImmMoveWide());
+ if (instr->ShiftMoveWide() > 0) {
+ AppendToOutput(", lsl #%d", 16 * instr->ShiftMoveWide());
+ }
+ }
+ return 8;
+ }
+ case 'L': {
+ switch (format[2]) {
+ case 'L': { // ILLiteral - Immediate Load Literal.
+ AppendToOutput("pc%+" PRId64,
+ instr->ImmLLiteral() << kLiteralEntrySizeLog2);
+ return 9;
+ }
+ case 'S': { // ILS - Immediate Load/Store.
+ if (instr->ImmLS() != 0) {
+ AppendToOutput(", #%" PRId64, instr->ImmLS());
+ }
+ return 3;
+ }
+ case 'P': { // ILPx - Immediate Load/Store Pair, x = access size.
+ if (instr->ImmLSPair() != 0) {
+ // format[3] is the scale value. Convert to a number.
+ int scale = format[3] - 0x30;
+ AppendToOutput(", #%" PRId64, instr->ImmLSPair() * scale);
+ }
+ return 4;
+ }
+ case 'U': { // ILU - Immediate Load/Store Unsigned.
+ if (instr->ImmLSUnsigned() != 0) {
+ AppendToOutput(", #%" PRIu64,
+ instr->ImmLSUnsigned() << instr->SizeLS());
+ }
+ return 3;
+ }
+ }
+ }
+ case 'C': { // ICondB - Immediate Conditional Branch.
+ int64_t offset = instr->ImmCondBranch() << 2;
+ char sign = (offset >= 0) ? '+' : '-';
+ AppendToOutput("#%c0x%" PRIx64, sign, offset);
+ return 6;
+ }
+ case 'A': { // IAddSub.
+ ASSERT(instr->ShiftAddSub() <= 1);
+ int64_t imm = instr->ImmAddSub() << (12 * instr->ShiftAddSub());
+ AppendToOutput("#0x%" PRIx64 " (%" PRId64 ")", imm, imm);
+ return 7;
+ }
+ case 'F': { // IFPSingle, IFPDouble or IFPFBits.
+ if (format[3] == 'F') { // IFPFbits.
+ AppendToOutput("#%d", 64 - instr->FPScale());
+ return 8;
+ } else {
+ AppendToOutput("#0x%" PRIx64 " (%.4f)", instr->ImmFP(),
+ format[3] == 'S' ? instr->ImmFP32() : instr->ImmFP64());
+ return 9;
+ }
+ }
+ case 'T': { // ITri - Immediate Triangular Encoded.
+ AppendToOutput("#0x%" PRIx64, instr->ImmLogical());
+ return 4;
+ }
+ case 'N': { // INzcv.
+ int nzcv = (instr->Nzcv() << Flags_offset);
+ AppendToOutput("#%c%c%c%c", ((nzcv & NFlag) == 0) ? 'n' : 'N',
+ ((nzcv & ZFlag) == 0) ? 'z' : 'Z',
+ ((nzcv & CFlag) == 0) ? 'c' : 'C',
+ ((nzcv & VFlag) == 0) ? 'v' : 'V');
+ return 5;
+ }
+ case 'P': { // IP - Conditional compare.
+ AppendToOutput("#%d", instr->ImmCondCmp());
+ return 2;
+ }
+ case 'B': { // Bitfields.
+ return SubstituteBitfieldImmediateField(instr, format);
+ }
+ case 'E': { // IExtract.
+ AppendToOutput("#%d", instr->ImmS());
+ return 8;
+ }
+ case 'S': { // IS - Test and branch bit.
+ AppendToOutput("#%d", (instr->ImmTestBranchBit5() << 5) |
+ instr->ImmTestBranchBit40());
+ return 2;
+ }
+ case 'D': { // IDebug - HLT and BRK instructions.
+ AppendToOutput("#0x%x", instr->ImmException());
+ return 6;
+ }
+ default: {
+ UNIMPLEMENTED();
+ return 0;
+ }
+ }
+}
+
+
+int Disassembler::SubstituteBitfieldImmediateField(Instruction* instr,
+ const char* format) {
+ ASSERT((format[0] == 'I') && (format[1] == 'B'));
+ unsigned r = instr->ImmR();
+ unsigned s = instr->ImmS();
+
+ switch (format[2]) {
+ case 'r': { // IBr.
+ AppendToOutput("#%d", r);
+ return 3;
+ }
+ case 's': { // IBs+1 or IBs-r+1.
+ if (format[3] == '+') {
+ AppendToOutput("#%d", s + 1);
+ return 5;
+ } else {
+ ASSERT(format[3] == '-');
+ AppendToOutput("#%d", s - r + 1);
+ return 7;
+ }
+ }
+ case 'Z': { // IBZ-r.
+ ASSERT((format[3] == '-') && (format[4] == 'r'));
+ unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSize : kWRegSize;
+ AppendToOutput("#%d", reg_size - r);
+ return 5;
+ }
+ default: {
+ UNREACHABLE();
+ return 0;
+ }
+ }
+}
+
+
+int Disassembler::SubstituteLiteralField(Instruction* instr,
+ const char* format) {
+ ASSERT(strncmp(format, "LValue", 6) == 0);
+ USE(format);
+
+ switch (instr->Mask(LoadLiteralMask)) {
+ case LDR_w_lit:
+ case LDR_x_lit:
+ case LDR_s_lit:
+ case LDR_d_lit: AppendToOutput("(addr %p)", instr->LiteralAddress()); break;
+ default: UNREACHABLE();
+ }
+
+ return 6;
+}
+
+
+int Disassembler::SubstituteShiftField(Instruction* instr, const char* format) {
+ ASSERT(format[0] == 'H');
+ ASSERT(instr->ShiftDP() <= 0x3);
+
+ switch (format[1]) {
+ case 'D': { // HDP.
+ ASSERT(instr->ShiftDP() != ROR);
+ } // Fall through.
+ case 'L': { // HLo.
+ if (instr->ImmDPShift() != 0) {
+ const char* shift_type[] = {"lsl", "lsr", "asr", "ror"};
+ AppendToOutput(", %s #%" PRId64, shift_type[instr->ShiftDP()],
+ instr->ImmDPShift());
+ }
+ return 3;
+ }
+ default:
+ UNIMPLEMENTED();
+ return 0;
+ }
+}
+
+
+int Disassembler::SubstituteConditionField(Instruction* instr,
+ const char* format) {
+ ASSERT(format[0] == 'C');
+ const char* condition_code[] = { "eq", "ne", "hs", "lo",
+ "mi", "pl", "vs", "vc",
+ "hi", "ls", "ge", "lt",
+ "gt", "le", "al", "nv" };
+ int cond;
+ switch (format[1]) {
+ case 'B': cond = instr->ConditionBranch(); break;
+ case 'I': {
+ cond = InvertCondition(static_cast<Condition>(instr->Condition()));
+ break;
+ }
+ default: cond = instr->Condition();
+ }
+ AppendToOutput("%s", condition_code[cond]);
+ return 4;
+}
+
+
+int Disassembler::SubstitutePCRelAddressField(Instruction* instr,
+ const char* format) {
+ USE(format);
+ ASSERT(strncmp(format, "AddrPCRel", 9) == 0);
+
+ int offset = instr->ImmPCRel();
+
+ // Only ADR (AddrPCRelByte) is supported.
+ ASSERT(strcmp(format, "AddrPCRelByte") == 0);
+
+ char sign = '+';
+ if (offset < 0) {
+ offset = -offset;
+ sign = '-';
+ }
+ // TODO: Extend this to support printing the target address.
+ AppendToOutput("#%c0x%x", sign, offset);
+ return 13;
+}
+
+
+int Disassembler::SubstituteBranchTargetField(Instruction* instr,
+ const char* format) {
+ ASSERT(strncmp(format, "BImm", 4) == 0);
+
+ int64_t offset = 0;
+ switch (format[5]) {
+ // BImmUncn - unconditional branch immediate.
+ case 'n': offset = instr->ImmUncondBranch(); break;
+ // BImmCond - conditional branch immediate.
+ case 'o': offset = instr->ImmCondBranch(); break;
+ // BImmCmpa - compare and branch immediate.
+ case 'm': offset = instr->ImmCmpBranch(); break;
+ // BImmTest - test and branch immediate.
+ case 'e': offset = instr->ImmTestBranch(); break;
+ default: UNIMPLEMENTED();
+ }
+ offset <<= kInstructionSizeLog2;
+ char sign = '+';
+ if (offset < 0) {
+ offset = -offset;
+ sign = '-';
+ }
+ AppendToOutput("#%c0x%" PRIx64, sign, offset);
+ return 8;
+}
+
+
+int Disassembler::SubstituteExtendField(Instruction* instr,
+ const char* format) {
+ ASSERT(strncmp(format, "Ext", 3) == 0);
+ ASSERT(instr->ExtendMode() <= 7);
+ USE(format);
+
+ const char* extend_mode[] = { "uxtb", "uxth", "uxtw", "uxtx",
+ "sxtb", "sxth", "sxtw", "sxtx" };
+
+ // If rd or rn is SP, uxtw on 32-bit registers and uxtx on 64-bit
+ // registers becomes lsl.
+ if (((instr->Rd() == kZeroRegCode) || (instr->Rn() == kZeroRegCode)) &&
+ (((instr->ExtendMode() == UXTW) && (instr->SixtyFourBits() == 0)) ||
+ (instr->ExtendMode() == UXTX))) {
+ if (instr->ImmExtendShift() > 0) {
+ AppendToOutput(", lsl #%d", instr->ImmExtendShift());
+ }
+ } else {
+ AppendToOutput(", %s", extend_mode[instr->ExtendMode()]);
+ if (instr->ImmExtendShift() > 0) {
+ AppendToOutput(" #%d", instr->ImmExtendShift());
+ }
+ }
+ return 3;
+}
+
+
+int Disassembler::SubstituteLSRegOffsetField(Instruction* instr,
+ const char* format) {
+ ASSERT(strncmp(format, "Offsetreg", 9) == 0);
+ const char* extend_mode[] = { "undefined", "undefined", "uxtw", "lsl",
+ "undefined", "undefined", "sxtw", "sxtx" };
+ USE(format);
+
+ unsigned shift = instr->ImmShiftLS();
+ Extend ext = static_cast<Extend>(instr->ExtendMode());
+ char reg_type = ((ext == UXTW) || (ext == SXTW)) ? 'w' : 'x';
+
+ unsigned rm = instr->Rm();
+ if (rm == kZeroRegCode) {
+ AppendToOutput("%czr", reg_type);
+ } else {
+ AppendToOutput("%c%d", reg_type, rm);
+ }
+
+ // Extend mode UXTX is an alias for shift mode LSL here.
+ if (!((ext == UXTX) && (shift == 0))) {
+ AppendToOutput(", %s", extend_mode[ext]);
+ if (shift != 0) {
+ AppendToOutput(" #%d", instr->SizeLS());
+ }
+ }
+ return 9;
+}
+
+
+int Disassembler::SubstitutePrefetchField(Instruction* instr,
+ const char* format) {
+ ASSERT(format[0] == 'P');
+ USE(format);
+
+ int prefetch_mode = instr->PrefetchMode();
+
+ const char* ls = (prefetch_mode & 0x10) ? "st" : "ld";
+ int level = (prefetch_mode >> 1) + 1;
+ const char* ks = (prefetch_mode & 1) ? "strm" : "keep";
+
+ AppendToOutput("p%sl%d%s", ls, level, ks);
+ return 6;
+}
+
+
+void Disassembler::ResetOutput() {
+ buffer_pos_ = 0;
+ buffer_[buffer_pos_] = 0;
+}
+
+
+void Disassembler::AppendToOutput(const char* format, ...) {
+ va_list args;
+ va_start(args, format);
+ buffer_pos_ += vsnprintf(&buffer_[buffer_pos_], buffer_size_, format, args);
+ va_end(args);
+}
+
+
+void PrintDisassembler::ProcessOutput(Instruction* instr) {
+ fprintf(stream_, "0x%016" PRIx64 " %08" PRIx32 "\t\t%s\n",
+ reinterpret_cast<uint64_t>(instr),
+ instr->InstructionBits(),
+ GetOutput());
+}
+} // namespace vixl
diff --git a/disas/libvixl/a64/disasm-a64.h b/disas/libvixl/a64/disasm-a64.h
new file mode 100644
index 0000000000..857a5acac4
--- /dev/null
+++ b/disas/libvixl/a64/disasm-a64.h
@@ -0,0 +1,109 @@
+// Copyright 2013, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef VIXL_A64_DISASM_A64_H
+#define VIXL_A64_DISASM_A64_H
+
+#include "globals.h"
+#include "utils.h"
+#include "instructions-a64.h"
+#include "decoder-a64.h"
+
+namespace vixl {
+
+class Disassembler: public DecoderVisitor {
+ public:
+ Disassembler();
+ Disassembler(char* text_buffer, int buffer_size);
+ virtual ~Disassembler();
+ char* GetOutput();
+
+ // Declare all Visitor functions.
+ #define DECLARE(A) void Visit##A(Instruction* instr);
+ VISITOR_LIST(DECLARE)
+ #undef DECLARE
+
+ protected:
+ virtual void ProcessOutput(Instruction* instr);
+
+ private:
+ void Format(Instruction* instr, const char* mnemonic, const char* format);
+ void Substitute(Instruction* instr, const char* string);
+ int SubstituteField(Instruction* instr, const char* format);
+ int SubstituteRegisterField(Instruction* instr, const char* format);
+ int SubstituteImmediateField(Instruction* instr, const char* format);
+ int SubstituteLiteralField(Instruction* instr, const char* format);
+ int SubstituteBitfieldImmediateField(Instruction* instr, const char* format);
+ int SubstituteShiftField(Instruction* instr, const char* format);
+ int SubstituteExtendField(Instruction* instr, const char* format);
+ int SubstituteConditionField(Instruction* instr, const char* format);
+ int SubstitutePCRelAddressField(Instruction* instr, const char* format);
+ int SubstituteBranchTargetField(Instruction* instr, const char* format);
+ int SubstituteLSRegOffsetField(Instruction* instr, const char* format);
+ int SubstitutePrefetchField(Instruction* instr, const char* format);
+
+ inline bool RdIsZROrSP(Instruction* instr) const {
+ return (instr->Rd() == kZeroRegCode);
+ }
+
+ inline bool RnIsZROrSP(Instruction* instr) const {
+ return (instr->Rn() == kZeroRegCode);
+ }
+
+ inline bool RmIsZROrSP(Instruction* instr) const {
+ return (instr->Rm() == kZeroRegCode);
+ }
+
+ inline bool RaIsZROrSP(Instruction* instr) const {
+ return (instr->Ra() == kZeroRegCode);
+ }
+
+ bool IsMovzMovnImm(unsigned reg_size, uint64_t value);
+
+ void ResetOutput();
+ void AppendToOutput(const char* string, ...);
+
+ char* buffer_;
+ uint32_t buffer_pos_;
+ uint32_t buffer_size_;
+ bool own_buffer_;
+};
+
+
+class PrintDisassembler: public Disassembler {
+ public:
+ explicit PrintDisassembler(FILE* stream) : stream_(stream) { }
+ ~PrintDisassembler() { }
+
+ protected:
+ virtual void ProcessOutput(Instruction* instr);
+
+ private:
+ FILE *stream_;
+};
+} // namespace vixl
+
+#endif // VIXL_A64_DISASM_A64_H
diff --git a/disas/libvixl/a64/instructions-a64.cc b/disas/libvixl/a64/instructions-a64.cc
new file mode 100644
index 0000000000..e87fa3acce
--- /dev/null
+++ b/disas/libvixl/a64/instructions-a64.cc
@@ -0,0 +1,238 @@
+// Copyright 2013, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "a64/instructions-a64.h"
+#include "a64/assembler-a64.h"
+
+namespace vixl {
+
+
+static uint64_t RotateRight(uint64_t value,
+ unsigned int rotate,
+ unsigned int width) {
+ ASSERT(width <= 64);
+ rotate &= 63;
+ return ((value & ((1UL << rotate) - 1UL)) << (width - rotate)) |
+ (value >> rotate);
+}
+
+
+static uint64_t RepeatBitsAcrossReg(unsigned reg_size,
+ uint64_t value,
+ unsigned width) {
+ ASSERT((width == 2) || (width == 4) || (width == 8) || (width == 16) ||
+ (width == 32));
+ ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
+ uint64_t result = value & ((1UL << width) - 1UL);
+ for (unsigned i = width; i < reg_size; i *= 2) {
+ result |= (result << i);
+ }
+ return result;
+}
+
+
+// Logical immediates can't encode zero, so a return value of zero is used to
+// indicate a failure case. Specifically, where the constraints on imm_s are
+// not met.
+uint64_t Instruction::ImmLogical() {
+ unsigned reg_size = SixtyFourBits() ? kXRegSize : kWRegSize;
+ int64_t n = BitN();
+ int64_t imm_s = ImmSetBits();
+ int64_t imm_r = ImmRotate();
+
+ // An integer is constructed from the n, imm_s and imm_r bits according to
+ // the following table:
+ //
+ // N imms immr size S R
+ // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
+ // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
+ // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
+ // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
+ // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
+ // 0 11110s xxxxxr 2 UInt(s) UInt(r)
+ // (s bits must not be all set)
+ //
+ // A pattern is constructed of size bits, where the least significant S+1
+ // bits are set. The pattern is rotated right by R, and repeated across a
+ // 32 or 64-bit value, depending on destination register width.
+ //
+
+ if (n == 1) {
+ if (imm_s == 0x3F) {
+ return 0;
+ }
+ uint64_t bits = (1UL << (imm_s + 1)) - 1;
+ return RotateRight(bits, imm_r, 64);
+ } else {
+ if ((imm_s >> 1) == 0x1F) {
+ return 0;
+ }
+ for (int width = 0x20; width >= 0x2; width >>= 1) {
+ if ((imm_s & width) == 0) {
+ int mask = width - 1;
+ if ((imm_s & mask) == mask) {
+ return 0;
+ }
+ uint64_t bits = (1UL << ((imm_s & mask) + 1)) - 1;
+ return RepeatBitsAcrossReg(reg_size,
+ RotateRight(bits, imm_r & mask, width),
+ width);
+ }
+ }
+ }
+ UNREACHABLE();
+ return 0;
+}
+
+
+float Instruction::ImmFP32() {
+ // ImmFP: abcdefgh (8 bits)
+ // Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits)
+ // where B is b ^ 1
+ uint32_t bits = ImmFP();
+ uint32_t bit7 = (bits >> 7) & 0x1;
+ uint32_t bit6 = (bits >> 6) & 0x1;
+ uint32_t bit5_to_0 = bits & 0x3f;
+ uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19);
+
+ return rawbits_to_float(result);
+}
+
+
+double Instruction::ImmFP64() {
+ // ImmFP: abcdefgh (8 bits)
+ // Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
+ // 0000.0000.0000.0000.0000.0000.0000.0000 (64 bits)
+ // where B is b ^ 1
+ uint32_t bits = ImmFP();
+ uint64_t bit7 = (bits >> 7) & 0x1;
+ uint64_t bit6 = (bits >> 6) & 0x1;
+ uint64_t bit5_to_0 = bits & 0x3f;
+ uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48);
+
+ return rawbits_to_double(result);
+}
+
+
+LSDataSize CalcLSPairDataSize(LoadStorePairOp op) {
+ switch (op) {
+ case STP_x:
+ case LDP_x:
+ case STP_d:
+ case LDP_d: return LSDoubleWord;
+ default: return LSWord;
+ }
+}
+
+
+Instruction* Instruction::ImmPCOffsetTarget() {
+ ptrdiff_t offset;
+ if (IsPCRelAddressing()) {
+ // PC-relative addressing. Only ADR is supported.
+ offset = ImmPCRel();
+ } else {
+ // All PC-relative branches.
+ ASSERT(BranchType() != UnknownBranchType);
+ // Relative branch offsets are instruction-size-aligned.
+ offset = ImmBranch() << kInstructionSizeLog2;
+ }
+ return this + offset;
+}
+
+
+inline int Instruction::ImmBranch() const {
+ switch (BranchType()) {
+ case CondBranchType: return ImmCondBranch();
+ case UncondBranchType: return ImmUncondBranch();
+ case CompareBranchType: return ImmCmpBranch();
+ case TestBranchType: return ImmTestBranch();
+ default: UNREACHABLE();
+ }
+ return 0;
+}
+
+
+void Instruction::SetImmPCOffsetTarget(Instruction* target) {
+ if (IsPCRelAddressing()) {
+ SetPCRelImmTarget(target);
+ } else {
+ SetBranchImmTarget(target);
+ }
+}
+
+
+void Instruction::SetPCRelImmTarget(Instruction* target) {
+ // ADRP is not supported, so 'this' must point to an ADR instruction.
+ ASSERT(Mask(PCRelAddressingMask) == ADR);
+
+ Instr imm = Assembler::ImmPCRelAddress(target - this);
+
+ SetInstructionBits(Mask(~ImmPCRel_mask) | imm);
+}
+
+
+void Instruction::SetBranchImmTarget(Instruction* target) {
+ ASSERT(((target - this) & 3) == 0);
+ Instr branch_imm = 0;
+ uint32_t imm_mask = 0;
+ int offset = (target - this) >> kInstructionSizeLog2;
+ switch (BranchType()) {
+ case CondBranchType: {
+ branch_imm = Assembler::ImmCondBranch(offset);
+ imm_mask = ImmCondBranch_mask;
+ break;
+ }
+ case UncondBranchType: {
+ branch_imm = Assembler::ImmUncondBranch(offset);
+ imm_mask = ImmUncondBranch_mask;
+ break;
+ }
+ case CompareBranchType: {
+ branch_imm = Assembler::ImmCmpBranch(offset);
+ imm_mask = ImmCmpBranch_mask;
+ break;
+ }
+ case TestBranchType: {
+ branch_imm = Assembler::ImmTestBranch(offset);
+ imm_mask = ImmTestBranch_mask;
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ SetInstructionBits(Mask(~imm_mask) | branch_imm);
+}
+
+
+void Instruction::SetImmLLiteral(Instruction* source) {
+ ASSERT(((source - this) & 3) == 0);
+ int offset = (source - this) >> kLiteralEntrySizeLog2;
+ Instr imm = Assembler::ImmLLiteral(offset);
+ Instr mask = ImmLLiteral_mask;
+
+ SetInstructionBits(Mask(~mask) | imm);
+}
+} // namespace vixl
+
diff --git a/disas/libvixl/a64/instructions-a64.h b/disas/libvixl/a64/instructions-a64.h
new file mode 100644
index 0000000000..ba9068ca8b
--- /dev/null
+++ b/disas/libvixl/a64/instructions-a64.h
@@ -0,0 +1,344 @@
+// Copyright 2013, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef VIXL_A64_INSTRUCTIONS_A64_H_
+#define VIXL_A64_INSTRUCTIONS_A64_H_
+
+#include "globals.h"
+#include "utils.h"
+#include "a64/constants-a64.h"
+
+namespace vixl {
+// ISA constants. --------------------------------------------------------------
+
+typedef uint32_t Instr;
+const unsigned kInstructionSize = 4;
+const unsigned kInstructionSizeLog2 = 2;
+const unsigned kLiteralEntrySize = 4;
+const unsigned kLiteralEntrySizeLog2 = 2;
+const unsigned kMaxLoadLiteralRange = 1 * MBytes;
+
+const unsigned kWRegSize = 32;
+const unsigned kWRegSizeLog2 = 5;
+const unsigned kWRegSizeInBytes = kWRegSize / 8;
+const unsigned kXRegSize = 64;
+const unsigned kXRegSizeLog2 = 6;
+const unsigned kXRegSizeInBytes = kXRegSize / 8;
+const unsigned kSRegSize = 32;
+const unsigned kSRegSizeLog2 = 5;
+const unsigned kSRegSizeInBytes = kSRegSize / 8;
+const unsigned kDRegSize = 64;
+const unsigned kDRegSizeLog2 = 6;
+const unsigned kDRegSizeInBytes = kDRegSize / 8;
+const int64_t kWRegMask = 0x00000000ffffffffLL;
+const int64_t kXRegMask = 0xffffffffffffffffLL;
+const int64_t kSRegMask = 0x00000000ffffffffLL;
+const int64_t kDRegMask = 0xffffffffffffffffLL;
+const int64_t kXSignMask = 0x1LL << 63;
+const int64_t kWSignMask = 0x1LL << 31;
+const int64_t kByteMask = 0xffL;
+const int64_t kHalfWordMask = 0xffffL;
+const int64_t kWordMask = 0xffffffffLL;
+const uint64_t kXMaxUInt = 0xffffffffffffffffULL;
+const uint64_t kWMaxUInt = 0xffffffffULL;
+const int64_t kXMaxInt = 0x7fffffffffffffffLL;
+const int64_t kXMinInt = 0x8000000000000000LL;
+const int32_t kWMaxInt = 0x7fffffff;
+const int32_t kWMinInt = 0x80000000;
+const unsigned kLinkRegCode = 30;
+const unsigned kZeroRegCode = 31;
+const unsigned kSPRegInternalCode = 63;
+const unsigned kRegCodeMask = 0x1f;
+
+// AArch64 floating-point specifics. These match IEEE-754.
+const unsigned kDoubleMantissaBits = 52;
+const unsigned kDoubleExponentBits = 11;
+const unsigned kFloatMantissaBits = 23;
+const unsigned kFloatExponentBits = 8;
+
+const float kFP32PositiveInfinity = rawbits_to_float(0x7f800000);
+const float kFP32NegativeInfinity = rawbits_to_float(0xff800000);
+const double kFP64PositiveInfinity = rawbits_to_double(0x7ff0000000000000ULL);
+const double kFP64NegativeInfinity = rawbits_to_double(0xfff0000000000000ULL);
+
+// This value is a signalling NaN as both a double and as a float (taking the
+// least-significant word).
+static const double kFP64SignallingNaN = rawbits_to_double(0x7ff000007f800001ULL);
+static const float kFP32SignallingNaN = rawbits_to_float(0x7f800001);
+
+// A similar value, but as a quiet NaN.
+static const double kFP64QuietNaN = rawbits_to_double(0x7ff800007fc00001ULL);
+static const float kFP32QuietNaN = rawbits_to_float(0x7fc00001);
+
+enum LSDataSize {
+ LSByte = 0,
+ LSHalfword = 1,
+ LSWord = 2,
+ LSDoubleWord = 3
+};
+
+LSDataSize CalcLSPairDataSize(LoadStorePairOp op);
+
+enum ImmBranchType {
+ UnknownBranchType = 0,
+ CondBranchType = 1,
+ UncondBranchType = 2,
+ CompareBranchType = 3,
+ TestBranchType = 4
+};
+
+enum AddrMode {
+ Offset,
+ PreIndex,
+ PostIndex
+};
+
+enum FPRounding {
+ // The first four values are encodable directly by FPCR<RMode>.
+ FPTieEven = 0x0,
+ FPPositiveInfinity = 0x1,
+ FPNegativeInfinity = 0x2,
+ FPZero = 0x3,
+
+ // The final rounding mode is only available when explicitly specified by the
+ // instruction (such as with fcvta). It cannot be set in FPCR.
+ FPTieAway
+};
+
+enum Reg31Mode {
+ Reg31IsStackPointer,
+ Reg31IsZeroRegister
+};
+
+// Instructions. ---------------------------------------------------------------
+
+class Instruction {
+ public:
+ inline Instr InstructionBits() const {
+ return *(reinterpret_cast<const Instr*>(this));
+ }
+
+ inline void SetInstructionBits(Instr new_instr) {
+ *(reinterpret_cast<Instr*>(this)) = new_instr;
+ }
+
+ inline int Bit(int pos) const {
+ return (InstructionBits() >> pos) & 1;
+ }
+
+ inline uint32_t Bits(int msb, int lsb) const {
+ return unsigned_bitextract_32(msb, lsb, InstructionBits());
+ }
+
+ inline int32_t SignedBits(int msb, int lsb) const {
+ int32_t bits = *(reinterpret_cast<const int32_t*>(this));
+ return signed_bitextract_32(msb, lsb, bits);
+ }
+
+ inline Instr Mask(uint32_t mask) const {
+ return InstructionBits() & mask;
+ }
+
+ #define DEFINE_GETTER(Name, HighBit, LowBit, Func) \
+ inline int64_t Name() const { return Func(HighBit, LowBit); }
+ INSTRUCTION_FIELDS_LIST(DEFINE_GETTER)
+ #undef DEFINE_GETTER
+
+ // ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST),
+ // formed from ImmPCRelLo and ImmPCRelHi.
+ int ImmPCRel() const {
+ int const offset = ((ImmPCRelHi() << ImmPCRelLo_width) | ImmPCRelLo());
+ int const width = ImmPCRelLo_width + ImmPCRelHi_width;
+ return signed_bitextract_32(width-1, 0, offset);
+ }
+
+ uint64_t ImmLogical();
+ float ImmFP32();
+ double ImmFP64();
+
+ inline LSDataSize SizeLSPair() const {
+ return CalcLSPairDataSize(
+ static_cast<LoadStorePairOp>(Mask(LoadStorePairMask)));
+ }
+
+ // Helpers.
+ inline bool IsCondBranchImm() const {
+ return Mask(ConditionalBranchFMask) == ConditionalBranchFixed;
+ }
+
+ inline bool IsUncondBranchImm() const {
+ return Mask(UnconditionalBranchFMask) == UnconditionalBranchFixed;
+ }
+
+ inline bool IsCompareBranch() const {
+ return Mask(CompareBranchFMask) == CompareBranchFixed;
+ }
+
+ inline bool IsTestBranch() const {
+ return Mask(TestBranchFMask) == TestBranchFixed;
+ }
+
+ inline bool IsPCRelAddressing() const {
+ return Mask(PCRelAddressingFMask) == PCRelAddressingFixed;
+ }
+
+ inline bool IsLogicalImmediate() const {
+ return Mask(LogicalImmediateFMask) == LogicalImmediateFixed;
+ }
+
+ inline bool IsAddSubImmediate() const {
+ return Mask(AddSubImmediateFMask) == AddSubImmediateFixed;
+ }
+
+ inline bool IsAddSubExtended() const {
+ return Mask(AddSubExtendedFMask) == AddSubExtendedFixed;
+ }
+
+ inline bool IsLoadOrStore() const {
+ return Mask(LoadStoreAnyFMask) == LoadStoreAnyFixed;
+ }
+
+ inline bool IsMovn() const {
+ return (Mask(MoveWideImmediateMask) == MOVN_x) ||
+ (Mask(MoveWideImmediateMask) == MOVN_w);
+ }
+
+ // Indicate whether Rd can be the stack pointer or the zero register. This
+ // does not check that the instruction actually has an Rd field.
+ inline Reg31Mode RdMode() const {
+ // The following instructions use sp or wsp as Rd:
+ // Add/sub (immediate) when not setting the flags.
+ // Add/sub (extended) when not setting the flags.
+ // Logical (immediate) when not setting the flags.
+ // Otherwise, r31 is the zero register.
+ if (IsAddSubImmediate() || IsAddSubExtended()) {
+ if (Mask(AddSubSetFlagsBit)) {
+ return Reg31IsZeroRegister;
+ } else {
+ return Reg31IsStackPointer;
+ }
+ }
+ if (IsLogicalImmediate()) {
+ // Of the logical (immediate) instructions, only ANDS (and its aliases)
+ // can set the flags. The others can all write into sp.
+ // Note that some logical operations are not available to
+ // immediate-operand instructions, so we have to combine two masks here.
+ if (Mask(LogicalImmediateMask & LogicalOpMask) == ANDS) {
+ return Reg31IsZeroRegister;
+ } else {
+ return Reg31IsStackPointer;
+ }
+ }
+ return Reg31IsZeroRegister;
+ }
+
+ // Indicate whether Rn can be the stack pointer or the zero register. This
+ // does not check that the instruction actually has an Rn field.
+ inline Reg31Mode RnMode() const {
+ // The following instructions use sp or wsp as Rn:
+ // All loads and stores.
+ // Add/sub (immediate).
+ // Add/sub (extended).
+ // Otherwise, r31 is the zero register.
+ if (IsLoadOrStore() || IsAddSubImmediate() || IsAddSubExtended()) {
+ return Reg31IsStackPointer;
+ }
+ return Reg31IsZeroRegister;
+ }
+
+ inline ImmBranchType BranchType() const {
+ if (IsCondBranchImm()) {
+ return CondBranchType;
+ } else if (IsUncondBranchImm()) {
+ return UncondBranchType;
+ } else if (IsCompareBranch()) {
+ return CompareBranchType;
+ } else if (IsTestBranch()) {
+ return TestBranchType;
+ } else {
+ return UnknownBranchType;
+ }
+ }
+
+ // Find the target of this instruction. 'this' may be a branch or a
+ // PC-relative addressing instruction.
+ Instruction* ImmPCOffsetTarget();
+
+ // Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or
+ // a PC-relative addressing instruction.
+ void SetImmPCOffsetTarget(Instruction* target);
+ // Patch a literal load instruction to load from 'source'.
+ void SetImmLLiteral(Instruction* source);
+
+ inline uint8_t* LiteralAddress() {
+ int offset = ImmLLiteral() << kLiteralEntrySizeLog2;
+ return reinterpret_cast<uint8_t*>(this) + offset;
+ }
+
+ inline uint32_t Literal32() {
+ uint32_t literal;
+ memcpy(&literal, LiteralAddress(), sizeof(literal));
+
+ return literal;
+ }
+
+ inline uint64_t Literal64() {
+ uint64_t literal;
+ memcpy(&literal, LiteralAddress(), sizeof(literal));
+
+ return literal;
+ }
+
+ inline float LiteralFP32() {
+ return rawbits_to_float(Literal32());
+ }
+
+ inline double LiteralFP64() {
+ return rawbits_to_double(Literal64());
+ }
+
+ inline Instruction* NextInstruction() {
+ return this + kInstructionSize;
+ }
+
+ inline Instruction* InstructionAtOffset(int64_t offset) {
+ ASSERT(IsWordAligned(this + offset));
+ return this + offset;
+ }
+
+ template<typename T> static inline Instruction* Cast(T src) {
+ return reinterpret_cast<Instruction*>(src);
+ }
+
+ private:
+ inline int ImmBranch() const;
+
+ void SetPCRelImmTarget(Instruction* target);
+ void SetBranchImmTarget(Instruction* target);
+};
+} // namespace vixl
+
+#endif // VIXL_A64_INSTRUCTIONS_A64_H_
diff --git a/disas/libvixl/globals.h b/disas/libvixl/globals.h
new file mode 100644
index 0000000000..a6a3fccd8a
--- /dev/null
+++ b/disas/libvixl/globals.h
@@ -0,0 +1,65 @@
+// Copyright 2013, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef VIXL_GLOBALS_H
+#define VIXL_GLOBALS_H
+
+// Get the standard printf format macros for C99 stdint types.
+#define __STDC_FORMAT_MACROS
+#include <inttypes.h>
+
+#include <assert.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <stddef.h>
+#include "platform.h"
+
+
+typedef uint8_t byte;
+
+const int KBytes = 1024;
+const int MBytes = 1024 * KBytes;
+
+ #define ABORT() printf("in %s, line %i", __FILE__, __LINE__); abort()
+#ifdef DEBUG
+ #define ASSERT(condition) assert(condition)
+ #define CHECK(condition) ASSERT(condition)
+ #define UNIMPLEMENTED() printf("UNIMPLEMENTED\t"); ABORT()
+ #define UNREACHABLE() printf("UNREACHABLE\t"); ABORT()
+#else
+ #define ASSERT(condition) ((void) 0)
+ #define CHECK(condition) assert(condition)
+ #define UNIMPLEMENTED() ((void) 0)
+ #define UNREACHABLE() ((void) 0)
+#endif
+
+template <typename T> inline void USE(T) {}
+
+#define ALIGNMENT_EXCEPTION() printf("ALIGNMENT EXCEPTION\t"); ABORT()
+
+#endif // VIXL_GLOBALS_H
diff --git a/disas/libvixl/platform.h b/disas/libvixl/platform.h
new file mode 100644
index 0000000000..a2600f370d
--- /dev/null
+++ b/disas/libvixl/platform.h
@@ -0,0 +1,43 @@
+// Copyright 2013, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef PLATFORM_H
+#define PLATFORM_H
+
+// Define platform specific functionalities.
+
+namespace vixl {
+#ifdef USE_SIMULATOR
+// Currently we assume running the simulator implies running on x86 hardware.
+inline void HostBreakpoint() { asm("int3"); }
+#else
+inline void HostBreakpoint() {
+ // TODO: Implement HostBreakpoint on a64.
+}
+#endif
+} // namespace vixl
+
+#endif
diff --git a/disas/libvixl/utils.cc b/disas/libvixl/utils.cc
new file mode 100644
index 0000000000..6f85e61835
--- /dev/null
+++ b/disas/libvixl/utils.cc
@@ -0,0 +1,120 @@
+// Copyright 2013, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "utils.h"
+#include <stdio.h>
+
+namespace vixl {
+
+uint32_t float_to_rawbits(float value) {
+ uint32_t bits = 0;
+ memcpy(&bits, &value, 4);
+ return bits;
+}
+
+
+uint64_t double_to_rawbits(double value) {
+ uint64_t bits = 0;
+ memcpy(&bits, &value, 8);
+ return bits;
+}
+
+
+float rawbits_to_float(uint32_t bits) {
+ float value = 0.0;
+ memcpy(&value, &bits, 4);
+ return value;
+}
+
+
+double rawbits_to_double(uint64_t bits) {
+ double value = 0.0;
+ memcpy(&value, &bits, 8);
+ return value;
+}
+
+
+int CountLeadingZeros(uint64_t value, int width) {
+ ASSERT((width == 32) || (width == 64));
+ int count = 0;
+ uint64_t bit_test = 1UL << (width - 1);
+ while ((count < width) && ((bit_test & value) == 0)) {
+ count++;
+ bit_test >>= 1;
+ }
+ return count;
+}
+
+
+int CountLeadingSignBits(int64_t value, int width) {
+ ASSERT((width == 32) || (width == 64));
+ if (value >= 0) {
+ return CountLeadingZeros(value, width) - 1;
+ } else {
+ return CountLeadingZeros(~value, width) - 1;
+ }
+}
+
+
+int CountTrailingZeros(uint64_t value, int width) {
+ ASSERT((width == 32) || (width == 64));
+ int count = 0;
+ while ((count < width) && (((value >> count) & 1) == 0)) {
+ count++;
+ }
+ return count;
+}
+
+
+int CountSetBits(uint64_t value, int width) {
+ // TODO: Other widths could be added here, as the implementation already
+ // supports them.
+ ASSERT((width == 32) || (width == 64));
+
+ // Mask out unused bits to ensure that they are not counted.
+ value &= (0xffffffffffffffffUL >> (64-width));
+
+ // Add up the set bits.
+ // The algorithm works by adding pairs of bit fields together iteratively,
+ // where the size of each bit field doubles each time.
+ // An example for an 8-bit value:
+ // Bits: h g f e d c b a
+ // \ | \ | \ | \ |
+ // value = h+g f+e d+c b+a
+ // \ | \ |
+ // value = h+g+f+e d+c+b+a
+ // \ |
+ // value = h+g+f+e+d+c+b+a
+ value = ((value >> 1) & 0x5555555555555555) + (value & 0x5555555555555555);
+ value = ((value >> 2) & 0x3333333333333333) + (value & 0x3333333333333333);
+ value = ((value >> 4) & 0x0f0f0f0f0f0f0f0f) + (value & 0x0f0f0f0f0f0f0f0f);
+ value = ((value >> 8) & 0x00ff00ff00ff00ff) + (value & 0x00ff00ff00ff00ff);
+ value = ((value >> 16) & 0x0000ffff0000ffff) + (value & 0x0000ffff0000ffff);
+ value = ((value >> 32) & 0x00000000ffffffff) + (value & 0x00000000ffffffff);
+
+ return value;
+}
+} // namespace vixl
diff --git a/disas/libvixl/utils.h b/disas/libvixl/utils.h
new file mode 100644
index 0000000000..029341eb14
--- /dev/null
+++ b/disas/libvixl/utils.h
@@ -0,0 +1,126 @@
+// Copyright 2013, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef VIXL_UTILS_H
+#define VIXL_UTILS_H
+
+
+#include <string.h>
+#include "globals.h"
+
+namespace vixl {
+
+// Check number width.
+inline bool is_intn(unsigned n, int64_t x) {
+ ASSERT((0 < n) && (n < 64));
+ int64_t limit = 1ULL << (n - 1);
+ return (-limit <= x) && (x < limit);
+}
+
+inline bool is_uintn(unsigned n, int64_t x) {
+ ASSERT((0 < n) && (n < 64));
+ return !(x >> n);
+}
+
+inline unsigned truncate_to_intn(unsigned n, int64_t x) {
+ ASSERT((0 < n) && (n < 64));
+ return (x & ((1ULL << n) - 1));
+}
+
+#define INT_1_TO_63_LIST(V) \
+V(1) V(2) V(3) V(4) V(5) V(6) V(7) V(8) \
+V(9) V(10) V(11) V(12) V(13) V(14) V(15) V(16) \
+V(17) V(18) V(19) V(20) V(21) V(22) V(23) V(24) \
+V(25) V(26) V(27) V(28) V(29) V(30) V(31) V(32) \
+V(33) V(34) V(35) V(36) V(37) V(38) V(39) V(40) \
+V(41) V(42) V(43) V(44) V(45) V(46) V(47) V(48) \
+V(49) V(50) V(51) V(52) V(53) V(54) V(55) V(56) \
+V(57) V(58) V(59) V(60) V(61) V(62) V(63)
+
+#define DECLARE_IS_INT_N(N) \
+inline bool is_int##N(int64_t x) { return is_intn(N, x); }
+#define DECLARE_IS_UINT_N(N) \
+inline bool is_uint##N(int64_t x) { return is_uintn(N, x); }
+#define DECLARE_TRUNCATE_TO_INT_N(N) \
+inline int truncate_to_int##N(int x) { return truncate_to_intn(N, x); }
+INT_1_TO_63_LIST(DECLARE_IS_INT_N)
+INT_1_TO_63_LIST(DECLARE_IS_UINT_N)
+INT_1_TO_63_LIST(DECLARE_TRUNCATE_TO_INT_N)
+#undef DECLARE_IS_INT_N
+#undef DECLARE_IS_UINT_N
+#undef DECLARE_TRUNCATE_TO_INT_N
+
+// Bit field extraction.
+inline uint32_t unsigned_bitextract_32(int msb, int lsb, uint32_t x) {
+ return (x >> lsb) & ((1 << (1 + msb - lsb)) - 1);
+}
+
+inline uint64_t unsigned_bitextract_64(int msb, int lsb, uint64_t x) {
+ return (x >> lsb) & ((static_cast<uint64_t>(1) << (1 + msb - lsb)) - 1);
+}
+
+inline int32_t signed_bitextract_32(int msb, int lsb, int32_t x) {
+ return (x << (31 - msb)) >> (lsb + 31 - msb);
+}
+
+inline int64_t signed_bitextract_64(int msb, int lsb, int64_t x) {
+ return (x << (63 - msb)) >> (lsb + 63 - msb);
+}
+
+// floating point representation
+uint32_t float_to_rawbits(float value);
+uint64_t double_to_rawbits(double value);
+float rawbits_to_float(uint32_t bits);
+double rawbits_to_double(uint64_t bits);
+
+// Bits counting.
+int CountLeadingZeros(uint64_t value, int width);
+int CountLeadingSignBits(int64_t value, int width);
+int CountTrailingZeros(uint64_t value, int width);
+int CountSetBits(uint64_t value, int width);
+
+// Pointer alignment
+// TODO: rename/refactor to make it specific to instructions.
+template<typename T>
+bool IsWordAligned(T pointer) {
+ ASSERT(sizeof(pointer) == sizeof(intptr_t)); // NOLINT(runtime/sizeof)
+ return (reinterpret_cast<intptr_t>(pointer) & 3) == 0;
+}
+
+// Increment a pointer until it has the specified alignment.
+template<class T>
+T AlignUp(T pointer, size_t alignment) {
+ ASSERT(sizeof(pointer) == sizeof(uintptr_t));
+ uintptr_t pointer_raw = reinterpret_cast<uintptr_t>(pointer);
+ size_t align_step = (alignment - pointer_raw) % alignment;
+ ASSERT((pointer_raw + align_step) % alignment == 0);
+ return reinterpret_cast<T>(pointer_raw + align_step);
+}
+
+
+} // namespace vixl
+
+#endif // VIXL_UTILS_H
diff --git a/exec.c b/exec.c
index 9ad0a4b045..b69fd295f9 100644
--- a/exec.c
+++ b/exec.c
@@ -138,6 +138,7 @@ typedef struct subpage_t {
static void io_mem_init(void);
static void memory_map_init(void);
+static void tcg_commit(MemoryListener *listener);
static MemoryRegion io_mem_watch;
#endif
@@ -325,7 +326,7 @@ address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *x
hwaddr *plen, bool resolve_subpage)
{
MemoryRegionSection *section;
- Int128 diff, diff_page;
+ Int128 diff;
section = address_space_lookup_region(d, addr, resolve_subpage);
/* Compute offset within MemoryRegionSection */
@@ -334,13 +335,23 @@ address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *x
/* Compute offset within MemoryRegion */
*xlat = addr + section->offset_within_region;
- diff_page = int128_make64(((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr);
diff = int128_sub(section->mr->size, int128_make64(addr));
- diff = int128_min(diff, diff_page);
*plen = int128_get64(int128_min(diff, int128_make64(*plen)));
return section;
}
+static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
+{
+ if (memory_region_is_ram(mr)) {
+ return !(is_write && mr->readonly);
+ }
+ if (memory_region_is_romd(mr)) {
+ return !is_write;
+ }
+
+ return false;
+}
+
MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
hwaddr *xlat, hwaddr *plen,
bool is_write)
@@ -351,7 +362,7 @@ MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
hwaddr len = *plen;
for (;;) {
- section = address_space_translate_internal(as->dispatch, addr, &addr, &len, true);
+ section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
mr = section->mr;
if (!mr->iommu_ops) {
@@ -370,6 +381,11 @@ MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
as = iotlb.target_as;
}
+ if (memory_access_is_direct(mr, is_write)) {
+ hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
+ len = MIN(page, len);
+ }
+
*plen = len;
*xlat = addr;
return mr;
@@ -438,6 +454,22 @@ CPUState *qemu_get_cpu(int index)
return NULL;
}
+#if !defined(CONFIG_USER_ONLY)
+void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
+{
+ /* We only support one address space per cpu at the moment. */
+ assert(cpu->as == as);
+
+ if (cpu->tcg_as_listener) {
+ memory_listener_unregister(cpu->tcg_as_listener);
+ } else {
+ cpu->tcg_as_listener = g_new0(MemoryListener, 1);
+ }
+ cpu->tcg_as_listener->commit = tcg_commit;
+ memory_listener_register(cpu->tcg_as_listener, as);
+}
+#endif
+
void cpu_exec_init(CPUArchState *env)
{
CPUState *cpu = ENV_GET_CPU(env);
@@ -457,6 +489,7 @@ void cpu_exec_init(CPUArchState *env)
QTAILQ_INIT(&env->breakpoints);
QTAILQ_INIT(&env->watchpoints);
#ifndef CONFIG_USER_ONLY
+ cpu->as = &address_space_memory;
cpu->thread_id = qemu_get_thread_id();
#endif
QTAILQ_INSERT_TAIL(&cpus, cpu, node);
@@ -488,7 +521,8 @@ static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
{
hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
if (phys != -1) {
- tb_invalidate_phys_addr(phys | (pc & ~TARGET_PAGE_MASK));
+ tb_invalidate_phys_addr(cpu->as,
+ phys | (pc & ~TARGET_PAGE_MASK));
}
}
#endif
@@ -780,7 +814,7 @@ hwaddr memory_region_section_get_iotlb(CPUArchState *env,
iotlb |= PHYS_SECTION_ROM;
}
} else {
- iotlb = section - address_space_memory.dispatch->map.sections;
+ iotlb = section - section->address_space->dispatch->map.sections;
iotlb += xlat;
}
@@ -876,6 +910,7 @@ static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *secti
if (!(existing->mr->subpage)) {
subpage = subpage_init(d->as, base);
+ subsection.address_space = d->as;
subsection.mr = &subpage->iomem;
phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
phys_section_add(&d->map, &subsection));
@@ -1577,9 +1612,9 @@ static uint64_t watch_mem_read(void *opaque, hwaddr addr,
{
check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
switch (size) {
- case 1: return ldub_phys(addr);
- case 2: return lduw_phys(addr);
- case 4: return ldl_phys(addr);
+ case 1: return ldub_phys(&address_space_memory, addr);
+ case 2: return lduw_phys(&address_space_memory, addr);
+ case 4: return ldl_phys(&address_space_memory, addr);
default: abort();
}
}
@@ -1590,13 +1625,13 @@ static void watch_mem_write(void *opaque, hwaddr addr,
check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
switch (size) {
case 1:
- stb_phys(addr, val);
+ stb_phys(&address_space_memory, addr, val);
break;
case 2:
- stw_phys(addr, val);
+ stw_phys(&address_space_memory, addr, val);
break;
case 4:
- stl_phys(addr, val);
+ stl_phys(&address_space_memory, addr, val);
break;
default: abort();
}
@@ -1721,6 +1756,7 @@ static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
static uint16_t dummy_section(PhysPageMap *map, MemoryRegion *mr)
{
MemoryRegionSection section = {
+ .address_space = &address_space_memory,
.mr = mr,
.offset_within_address_space = 0,
.offset_within_region = 0,
@@ -1730,10 +1766,9 @@ static uint16_t dummy_section(PhysPageMap *map, MemoryRegion *mr)
return phys_section_add(map, &section);
}
-MemoryRegion *iotlb_to_region(hwaddr index)
+MemoryRegion *iotlb_to_region(AddressSpace *as, hwaddr index)
{
- return address_space_memory.dispatch->map.sections[
- index & ~TARGET_PAGE_MASK].mr;
+ return as->dispatch->map.sections[index & ~TARGET_PAGE_MASK].mr;
}
static void io_mem_init(void)
@@ -1793,6 +1828,11 @@ static void tcg_commit(MemoryListener *listener)
CPU_FOREACH(cpu) {
CPUArchState *env = cpu->env_ptr;
+ /* FIXME: Disentangle the cpu.h circular files deps so we can
+ directly get the right CPU from listener. */
+ if (cpu->tcg_as_listener != listener) {
+ continue;
+ }
tlb_flush(env, 1);
}
}
@@ -1813,10 +1853,6 @@ static MemoryListener core_memory_listener = {
.priority = 1,
};
-static MemoryListener tcg_memory_listener = {
- .commit = tcg_commit,
-};
-
void address_space_init_dispatch(AddressSpace *as)
{
as->dispatch = NULL;
@@ -1852,9 +1888,6 @@ static void memory_map_init(void)
address_space_init(&address_space_io, system_io, "I/O");
memory_listener_register(&core_memory_listener, &address_space_memory);
- if (tcg_enabled()) {
- memory_listener_register(&tcg_memory_listener, &address_space_memory);
- }
}
MemoryRegion *get_system_memory(void)
@@ -1925,18 +1958,6 @@ static void invalidate_and_set_dirty(hwaddr addr,
xen_modified_memory(addr, length);
}
-static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
-{
- if (memory_region_is_ram(mr)) {
- return !(is_write && mr->readonly);
- }
- if (memory_region_is_romd(mr)) {
- return !is_write;
- }
-
- return false;
-}
-
static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
{
unsigned access_size_max = mr->ops->valid.max_access_size;
@@ -2081,7 +2102,7 @@ enum write_rom_type {
FLUSH_CACHE,
};
-static inline void cpu_physical_memory_write_rom_internal(
+static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
{
hwaddr l;
@@ -2091,8 +2112,7 @@ static inline void cpu_physical_memory_write_rom_internal(
while (len > 0) {
l = len;
- mr = address_space_translate(&address_space_memory,
- addr, &addr1, &l, true);
+ mr = address_space_translate(as, addr, &addr1, &l, true);
if (!(memory_region_is_ram(mr) ||
memory_region_is_romd(mr))) {
@@ -2118,10 +2138,10 @@ static inline void cpu_physical_memory_write_rom_internal(
}
/* used for ROM loading : can write in RAM and ROM */
-void cpu_physical_memory_write_rom(hwaddr addr,
+void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
const uint8_t *buf, int len)
{
- cpu_physical_memory_write_rom_internal(addr, buf, len, WRITE_DATA);
+ cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
}
void cpu_flush_icache_range(hwaddr start, int len)
@@ -2136,7 +2156,8 @@ void cpu_flush_icache_range(hwaddr start, int len)
return;
}
- cpu_physical_memory_write_rom_internal(start, NULL, len, FLUSH_CACHE);
+ cpu_physical_memory_write_rom_internal(&address_space_memory,
+ start, NULL, len, FLUSH_CACHE);
}
typedef struct {
@@ -2327,7 +2348,7 @@ void cpu_physical_memory_unmap(void *buffer, hwaddr len,
}
/* warning: addr must be aligned */
-static inline uint32_t ldl_phys_internal(hwaddr addr,
+static inline uint32_t ldl_phys_internal(AddressSpace *as, hwaddr addr,
enum device_endian endian)
{
uint8_t *ptr;
@@ -2336,8 +2357,7 @@ static inline uint32_t ldl_phys_internal(hwaddr addr,
hwaddr l = 4;
hwaddr addr1;
- mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
- false);
+ mr = address_space_translate(as, addr, &addr1, &l, false);
if (l < 4 || !memory_access_is_direct(mr, false)) {
/* I/O case */
io_mem_read(mr, addr1, &val, 4);
@@ -2370,23 +2390,23 @@ static inline uint32_t ldl_phys_internal(hwaddr addr,
return val;
}
-uint32_t ldl_phys(hwaddr addr)
+uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
{
- return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
+ return ldl_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
}
-uint32_t ldl_le_phys(hwaddr addr)
+uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
{
- return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
+ return ldl_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
}
-uint32_t ldl_be_phys(hwaddr addr)
+uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
{
- return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
+ return ldl_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
}
/* warning: addr must be aligned */
-static inline uint64_t ldq_phys_internal(hwaddr addr,
+static inline uint64_t ldq_phys_internal(AddressSpace *as, hwaddr addr,
enum device_endian endian)
{
uint8_t *ptr;
@@ -2395,7 +2415,7 @@ static inline uint64_t ldq_phys_internal(hwaddr addr,
hwaddr l = 8;
hwaddr addr1;
- mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
+ mr = address_space_translate(as, addr, &addr1, &l,
false);
if (l < 8 || !memory_access_is_direct(mr, false)) {
/* I/O case */
@@ -2429,31 +2449,31 @@ static inline uint64_t ldq_phys_internal(hwaddr addr,
return val;
}
-uint64_t ldq_phys(hwaddr addr)
+uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
{
- return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
+ return ldq_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
}
-uint64_t ldq_le_phys(hwaddr addr)
+uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
{
- return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
+ return ldq_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
}
-uint64_t ldq_be_phys(hwaddr addr)
+uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
{
- return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
+ return ldq_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
}
/* XXX: optimize */
-uint32_t ldub_phys(hwaddr addr)
+uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
{
uint8_t val;
- cpu_physical_memory_read(addr, &val, 1);
+ address_space_rw(as, addr, &val, 1, 0);
return val;
}
/* warning: addr must be aligned */
-static inline uint32_t lduw_phys_internal(hwaddr addr,
+static inline uint32_t lduw_phys_internal(AddressSpace *as, hwaddr addr,
enum device_endian endian)
{
uint8_t *ptr;
@@ -2462,7 +2482,7 @@ static inline uint32_t lduw_phys_internal(hwaddr addr,
hwaddr l = 2;
hwaddr addr1;
- mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
+ mr = address_space_translate(as, addr, &addr1, &l,
false);
if (l < 2 || !memory_access_is_direct(mr, false)) {
/* I/O case */
@@ -2496,32 +2516,32 @@ static inline uint32_t lduw_phys_internal(hwaddr addr,
return val;
}
-uint32_t lduw_phys(hwaddr addr)
+uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
{
- return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
+ return lduw_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
}
-uint32_t lduw_le_phys(hwaddr addr)
+uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
{
- return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
+ return lduw_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
}
-uint32_t lduw_be_phys(hwaddr addr)
+uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
{
- return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
+ return lduw_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
}
/* warning: addr must be aligned. The ram page is not masked as dirty
and the code inside is not invalidated. It is useful if the dirty
bits are used to track modified PTEs */
-void stl_phys_notdirty(hwaddr addr, uint32_t val)
+void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
{
uint8_t *ptr;
MemoryRegion *mr;
hwaddr l = 4;
hwaddr addr1;
- mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
+ mr = address_space_translate(as, addr, &addr1, &l,
true);
if (l < 4 || !memory_access_is_direct(mr, true)) {
io_mem_write(mr, addr1, val, 4);
@@ -2544,7 +2564,8 @@ void stl_phys_notdirty(hwaddr addr, uint32_t val)
}
/* warning: addr must be aligned */
-static inline void stl_phys_internal(hwaddr addr, uint32_t val,
+static inline void stl_phys_internal(AddressSpace *as,
+ hwaddr addr, uint32_t val,
enum device_endian endian)
{
uint8_t *ptr;
@@ -2552,7 +2573,7 @@ static inline void stl_phys_internal(hwaddr addr, uint32_t val,
hwaddr l = 4;
hwaddr addr1;
- mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
+ mr = address_space_translate(as, addr, &addr1, &l,
true);
if (l < 4 || !memory_access_is_direct(mr, true)) {
#if defined(TARGET_WORDS_BIGENDIAN)
@@ -2584,30 +2605,31 @@ static inline void stl_phys_internal(hwaddr addr, uint32_t val,
}
}
-void stl_phys(hwaddr addr, uint32_t val)
+void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
{
- stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
+ stl_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
}
-void stl_le_phys(hwaddr addr, uint32_t val)
+void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
{
- stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
+ stl_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
}
-void stl_be_phys(hwaddr addr, uint32_t val)
+void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
{
- stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
+ stl_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
}
/* XXX: optimize */
-void stb_phys(hwaddr addr, uint32_t val)
+void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
{
uint8_t v = val;
- cpu_physical_memory_write(addr, &v, 1);
+ address_space_rw(as, addr, &v, 1, 1);
}
/* warning: addr must be aligned */
-static inline void stw_phys_internal(hwaddr addr, uint32_t val,
+static inline void stw_phys_internal(AddressSpace *as,
+ hwaddr addr, uint32_t val,
enum device_endian endian)
{
uint8_t *ptr;
@@ -2615,8 +2637,7 @@ static inline void stw_phys_internal(hwaddr addr, uint32_t val,
hwaddr l = 2;
hwaddr addr1;
- mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
- true);
+ mr = address_space_translate(as, addr, &addr1, &l, true);
if (l < 2 || !memory_access_is_direct(mr, true)) {
#if defined(TARGET_WORDS_BIGENDIAN)
if (endian == DEVICE_LITTLE_ENDIAN) {
@@ -2647,38 +2668,38 @@ static inline void stw_phys_internal(hwaddr addr, uint32_t val,
}
}
-void stw_phys(hwaddr addr, uint32_t val)
+void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
{
- stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
+ stw_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
}
-void stw_le_phys(hwaddr addr, uint32_t val)
+void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
{
- stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
+ stw_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
}
-void stw_be_phys(hwaddr addr, uint32_t val)
+void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
{
- stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
+ stw_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
}
/* XXX: optimize */
-void stq_phys(hwaddr addr, uint64_t val)
+void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
{
val = tswap64(val);
- cpu_physical_memory_write(addr, &val, 8);
+ address_space_rw(as, addr, (void *) &val, 8, 1);
}
-void stq_le_phys(hwaddr addr, uint64_t val)
+void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
{
val = cpu_to_le64(val);
- cpu_physical_memory_write(addr, &val, 8);
+ address_space_rw(as, addr, (void *) &val, 8, 1);
}
-void stq_be_phys(hwaddr addr, uint64_t val)
+void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
{
val = cpu_to_be64(val);
- cpu_physical_memory_write(addr, &val, 8);
+ address_space_rw(as, addr, (void *) &val, 8, 1);
}
/* virtual memory access for debug (includes writing to ROM) */
@@ -2699,10 +2720,11 @@ int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
if (l > len)
l = len;
phys_addr += (addr & ~TARGET_PAGE_MASK);
- if (is_write)
- cpu_physical_memory_write_rom(phys_addr, buf, l);
- else
- cpu_physical_memory_rw(phys_addr, buf, l, is_write);
+ if (is_write) {
+ cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
+ } else {
+ address_space_rw(cpu->as, phys_addr, buf, l, 0);
+ }
len -= l;
buf += l;
addr += l;
diff --git a/hw/9pfs/cofile.c b/hw/9pfs/cofile.c
index 194c1306c6..2efebf3571 100644
--- a/hw/9pfs/cofile.c
+++ b/hw/9pfs/cofile.c
@@ -38,10 +38,6 @@ int v9fs_co_st_gen(V9fsPDU *pdu, V9fsPath *path, mode_t st_mode,
});
v9fs_path_unlock(s);
}
- /* The ioctl may not be supported depending on the path */
- if (err == -ENOTTY) {
- err = 0;
- }
return err;
}
diff --git a/hw/9pfs/virtio-9p-handle.c b/hw/9pfs/virtio-9p-handle.c
index fe8e0ed19d..17002a3d28 100644
--- a/hw/9pfs/virtio-9p-handle.c
+++ b/hw/9pfs/virtio-9p-handle.c
@@ -582,6 +582,7 @@ static int handle_unlinkat(FsContext *ctx, V9fsPath *dir,
static int handle_ioc_getversion(FsContext *ctx, V9fsPath *path,
mode_t st_mode, uint64_t *st_gen)
{
+#ifdef FS_IOC_GETVERSION
int err;
V9fsFidOpenState fid_open;
@@ -590,7 +591,8 @@ static int handle_ioc_getversion(FsContext *ctx, V9fsPath *path,
* We can get fd for regular files and directories only
*/
if (!S_ISREG(st_mode) && !S_ISDIR(st_mode)) {
- return 0;
+ errno = ENOTTY;
+ return -1;
}
err = handle_open(ctx, path, O_RDONLY, &fid_open);
if (err < 0) {
@@ -599,6 +601,10 @@ static int handle_ioc_getversion(FsContext *ctx, V9fsPath *path,
err = ioctl(fid_open.fd, FS_IOC_GETVERSION, st_gen);
handle_close(ctx, &fid_open);
return err;
+#else
+ errno = ENOTTY;
+ return -1;
+#endif
}
static int handle_init(FsContext *ctx)
diff --git a/hw/9pfs/virtio-9p-local.c b/hw/9pfs/virtio-9p-local.c
index fc93e9e6e8..df0dbffa7a 100644
--- a/hw/9pfs/virtio-9p-local.c
+++ b/hw/9pfs/virtio-9p-local.c
@@ -1068,8 +1068,8 @@ err_out:
static int local_ioc_getversion(FsContext *ctx, V9fsPath *path,
mode_t st_mode, uint64_t *st_gen)
{
- int err;
#ifdef FS_IOC_GETVERSION
+ int err;
V9fsFidOpenState fid_open;
/*
@@ -1077,7 +1077,8 @@ static int local_ioc_getversion(FsContext *ctx, V9fsPath *path,
* We can get fd for regular files and directories only
*/
if (!S_ISREG(st_mode) && !S_ISDIR(st_mode)) {
- return 0;
+ errno = ENOTTY;
+ return -1;
}
err = local_open(ctx, path, O_RDONLY, &fid_open);
if (err < 0) {
@@ -1085,10 +1086,11 @@ static int local_ioc_getversion(FsContext *ctx, V9fsPath *path,
}
err = ioctl(fid_open.fd, FS_IOC_GETVERSION, st_gen);
local_close(ctx, &fid_open);
+ return err;
#else
- err = -ENOTTY;
+ errno = ENOTTY;
+ return -1;
#endif
- return err;
}
static int local_init(FsContext *ctx)
diff --git a/hw/9pfs/virtio-9p-proxy.c b/hw/9pfs/virtio-9p-proxy.c
index 5f44bb758b..b57966d9d8 100644
--- a/hw/9pfs/virtio-9p-proxy.c
+++ b/hw/9pfs/virtio-9p-proxy.c
@@ -1086,7 +1086,8 @@ static int proxy_ioc_getversion(FsContext *fs_ctx, V9fsPath *path,
* we can get fd for regular files and directories only
*/
if (!S_ISREG(st_mode) && !S_ISDIR(st_mode)) {
- return 0;
+ errno = ENOTTY;
+ return -1;
}
err = v9fs_request(fs_ctx->private, T_GETVERSION, st_gen, "s", path);
if (err < 0) {
diff --git a/hw/9pfs/virtio-9p.c b/hw/9pfs/virtio-9p.c
index 8cbb8ae32a..83e4e93983 100644
--- a/hw/9pfs/virtio-9p.c
+++ b/hw/9pfs/virtio-9p.c
@@ -1080,10 +1080,18 @@ static void v9fs_getattr(void *opaque)
/* fill st_gen if requested and supported by underlying fs */
if (request_mask & P9_STATS_GEN) {
retval = v9fs_co_st_gen(pdu, &fidp->path, stbuf.st_mode, &v9stat_dotl);
- if (retval < 0) {
+ switch (retval) {
+ case 0:
+ /* we have valid st_gen: update result mask */
+ v9stat_dotl.st_result_mask |= P9_STATS_GEN;
+ break;
+ case -EINTR:
+ /* request cancelled, e.g. by Tflush */
goto out;
+ default:
+ /* failed to get st_gen: not fatal, ignore */
+ break;
}
- v9stat_dotl.st_result_mask |= P9_STATS_GEN;
}
retval = pdu_marshal(pdu, offset, "A", &v9stat_dotl);
if (retval < 0) {
diff --git a/hw/alpha/dp264.c b/hw/alpha/dp264.c
index 20795ac0fd..1351ba55bd 100644
--- a/hw/alpha/dp264.c
+++ b/hw/alpha/dp264.c
@@ -161,8 +161,9 @@ static void clipper_init(QEMUMachineInitArgs *args)
load_image_targphys(initrd_filename, initrd_base,
ram_size - initrd_base);
- stq_phys(param_offset + 0x100, initrd_base + 0xfffffc0000000000ULL);
- stq_phys(param_offset + 0x108, initrd_size);
+ stq_phys(&address_space_memory,
+ param_offset + 0x100, initrd_base + 0xfffffc0000000000ULL);
+ stq_phys(&address_space_memory, param_offset + 0x108, initrd_size);
}
}
}
diff --git a/hw/alpha/typhoon.c b/hw/alpha/typhoon.c
index 71a5a37fdc..67a1070281 100644
--- a/hw/alpha/typhoon.c
+++ b/hw/alpha/typhoon.c
@@ -613,7 +613,7 @@ static bool make_iommu_tlbe(hwaddr taddr, hwaddr mask, IOMMUTLBEntry *ret)
translation, given the address of the PTE. */
static bool pte_translate(hwaddr pte_addr, IOMMUTLBEntry *ret)
{
- uint64_t pte = ldq_phys(pte_addr);
+ uint64_t pte = ldq_phys(&address_space_memory, pte_addr);
/* Check valid bit. */
if ((pte & 1) == 0) {
diff --git a/hw/arm/allwinner-a10.c b/hw/arm/allwinner-a10.c
index 4658e19504..01206f243c 100644
--- a/hw/arm/allwinner-a10.c
+++ b/hw/arm/allwinner-a10.c
@@ -31,6 +31,13 @@ static void aw_a10_init(Object *obj)
object_initialize(&s->timer, sizeof(s->timer), TYPE_AW_A10_PIT);
qdev_set_parent_bus(DEVICE(&s->timer), sysbus_get_default());
+
+ object_initialize(&s->emac, sizeof(s->emac), TYPE_AW_EMAC);
+ qdev_set_parent_bus(DEVICE(&s->emac), sysbus_get_default());
+ if (nd_table[0].used) {
+ qemu_check_nic_model(&nd_table[0], TYPE_AW_EMAC);
+ qdev_set_nic_properties(DEVICE(&s->emac), &nd_table[0]);
+ }
}
static void aw_a10_realize(DeviceState *dev, Error **errp)
@@ -76,6 +83,15 @@ static void aw_a10_realize(DeviceState *dev, Error **errp)
sysbus_connect_irq(sysbusdev, 4, s->irq[67]);
sysbus_connect_irq(sysbusdev, 5, s->irq[68]);
+ object_property_set_bool(OBJECT(&s->emac), true, "realized", &err);
+ if (err != NULL) {
+ error_propagate(errp, err);
+ return;
+ }
+ sysbusdev = SYS_BUS_DEVICE(&s->emac);
+ sysbus_mmio_map(sysbusdev, 0, AW_A10_EMAC_BASE);
+ sysbus_connect_irq(sysbusdev, 0, s->irq[55]);
+
serial_mm_init(get_system_memory(), AW_A10_UART0_REG_BASE, 2, s->irq[1],
115200, serial_hds[0], DEVICE_NATIVE_ENDIAN);
}
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
index 4036262f50..dc62918da2 100644
--- a/hw/arm/boot.c
+++ b/hw/arm/boot.c
@@ -16,6 +16,7 @@
#include "elf.h"
#include "sysemu/device_tree.h"
#include "qemu/config-file.h"
+#include "exec/address-spaces.h"
/* Kernel boot protocol is specified in the kernel docs
* Documentation/arm/Booting and Documentation/arm64/booting.txt
@@ -169,7 +170,7 @@ static void default_reset_secondary(ARMCPU *cpu,
{
CPUARMState *env = &cpu->env;
- stl_phys_notdirty(info->smp_bootreg_addr, 0);
+ stl_phys_notdirty(&address_space_memory, info->smp_bootreg_addr, 0);
env->regs[15] = info->smp_loader_start;
}
@@ -179,7 +180,7 @@ static inline bool have_dtb(const struct arm_boot_info *info)
}
#define WRITE_WORD(p, value) do { \
- stl_phys_notdirty(p, value); \
+ stl_phys_notdirty(&address_space_memory, p, value); \
p += 4; \
} while (0)
diff --git a/hw/arm/cubieboard.c b/hw/arm/cubieboard.c
index 3fcb6d22f5..d95a7f35eb 100644
--- a/hw/arm/cubieboard.c
+++ b/hw/arm/cubieboard.c
@@ -36,10 +36,17 @@ static void cubieboard_init(QEMUMachineInitArgs *args)
Error *err = NULL;
s->a10 = AW_A10(object_new(TYPE_AW_A10));
+
+ object_property_set_int(OBJECT(&s->a10->emac), 1, "phy-addr", &err);
+ if (err != NULL) {
+ error_report("Couldn't set phy address: %s", error_get_pretty(err));
+ exit(1);
+ }
+
object_property_set_bool(OBJECT(s->a10), true, "realized", &err);
if (err != NULL) {
- error_report("Couldn't realize Allwinner A10: %s\n",
- error_get_pretty(err));
+ error_report("Couldn't realize Allwinner A10: %s",
+ error_get_pretty(err));
exit(1);
}
diff --git a/hw/arm/highbank.c b/hw/arm/highbank.c
index d76a1d1f78..f66d57b113 100644
--- a/hw/arm/highbank.c
+++ b/hw/arm/highbank.c
@@ -69,11 +69,11 @@ static void hb_reset_secondary(ARMCPU *cpu, const struct arm_boot_info *info)
switch (info->nb_cpus) {
case 4:
- stl_phys_notdirty(SMP_BOOT_REG + 0x30, 0);
+ stl_phys_notdirty(&address_space_memory, SMP_BOOT_REG + 0x30, 0);
case 3:
- stl_phys_notdirty(SMP_BOOT_REG + 0x20, 0);
+ stl_phys_notdirty(&address_space_memory, SMP_BOOT_REG + 0x20, 0);
case 2:
- stl_phys_notdirty(SMP_BOOT_REG + 0x10, 0);
+ stl_phys_notdirty(&address_space_memory, SMP_BOOT_REG + 0x10, 0);
env->regs[15] = SMP_BOOT_ADDR;
break;
default:
diff --git a/hw/core/loader.c b/hw/core/loader.c
index 0634bee20c..e1c3f3a860 100644
--- a/hw/core/loader.c
+++ b/hw/core/loader.c
@@ -778,7 +778,8 @@ static void rom_reset(void *unused)
void *host = memory_region_get_ram_ptr(rom->mr);
memcpy(host, rom->data, rom->datasize);
} else {
- cpu_physical_memory_write_rom(rom->addr, rom->data, rom->datasize);
+ cpu_physical_memory_write_rom(&address_space_memory,
+ rom->addr, rom->data, rom->datasize);
}
if (rom->isrom) {
/* rom needs to be written only once */
diff --git a/hw/display/sm501.c b/hw/display/sm501.c
index c75d6ac63c..0b5f993594 100644
--- a/hw/display/sm501.c
+++ b/hw/display/sm501.c
@@ -30,6 +30,7 @@
#include "hw/sysbus.h"
#include "qemu/range.h"
#include "ui/pixel_ops.h"
+#include "exec/address-spaces.h"
/*
* Status: 2010/05/07
diff --git a/hw/display/sm501_template.h b/hw/display/sm501_template.h
index 2d4a3d8b48..d4cea9e150 100644
--- a/hw/display/sm501_template.h
+++ b/hw/display/sm501_template.h
@@ -120,7 +120,7 @@ static void glue(draw_hwc_line_, PIXEL_NAME)(SM501State * s, int crt,
/* get pixel value */
if (i % 4 == 0) {
- bitset = ldub_phys(cursor_addr);
+ bitset = ldub_phys(&address_space_memory, cursor_addr);
cursor_addr++;
}
v = bitset & 3;
diff --git a/hw/dma/pl080.c b/hw/dma/pl080.c
index cb7bda9803..741dd20d31 100644
--- a/hw/dma/pl080.c
+++ b/hw/dma/pl080.c
@@ -8,6 +8,7 @@
*/
#include "hw/sysbus.h"
+#include "exec/address-spaces.h"
#define PL080_MAX_CHANNELS 8
#define PL080_CONF_E 0x1
@@ -204,10 +205,10 @@ again:
if (size == 0) {
/* Transfer complete. */
if (ch->lli) {
- ch->src = ldl_le_phys(ch->lli);
- ch->dest = ldl_le_phys(ch->lli + 4);
- ch->ctrl = ldl_le_phys(ch->lli + 12);
- ch->lli = ldl_le_phys(ch->lli + 8);
+ ch->src = ldl_le_phys(&address_space_memory, ch->lli);
+ ch->dest = ldl_le_phys(&address_space_memory, ch->lli + 4);
+ ch->ctrl = ldl_le_phys(&address_space_memory, ch->lli + 12);
+ ch->lli = ldl_le_phys(&address_space_memory, ch->lli + 8);
} else {
ch->conf &= ~PL080_CCONF_E;
}
diff --git a/hw/dma/sun4m_iommu.c b/hw/dma/sun4m_iommu.c
index a04409a273..723f66d8f2 100644
--- a/hw/dma/sun4m_iommu.c
+++ b/hw/dma/sun4m_iommu.c
@@ -24,6 +24,7 @@
#include "hw/sparc/sun4m.h"
#include "hw/sysbus.h"
+#include "exec/address-spaces.h"
#include "trace.h"
/*
@@ -262,7 +263,7 @@ static uint32_t iommu_page_get_flags(IOMMUState *s, hwaddr addr)
iopte = s->regs[IOMMU_BASE] << 4;
addr &= ~s->iostart;
iopte += (addr >> (IOMMU_PAGE_SHIFT - 2)) & ~3;
- ret = ldl_be_phys(iopte);
+ ret = ldl_be_phys(&address_space_memory, iopte);
trace_sun4m_iommu_page_get_flags(pa, iopte, ret);
return ret;
}
diff --git a/hw/intc/apic.c b/hw/intc/apic.c
index 3d3deb6298..361ae90b65 100644
--- a/hw/intc/apic.c
+++ b/hw/intc/apic.c
@@ -129,7 +129,8 @@ static void apic_sync_vapic(APICCommonState *s, int sync_type)
}
vapic_state.irr = vector & 0xff;
- cpu_physical_memory_write_rom(s->vapic_paddr + start,
+ cpu_physical_memory_write_rom(&address_space_memory,
+ s->vapic_paddr + start,
((void *)&vapic_state) + start, length);
}
}
diff --git a/hw/intc/arm_gic.c b/hw/intc/arm_gic.c
index 1c4a1143af..93eaa6b2fa 100644
--- a/hw/intc/arm_gic.c
+++ b/hw/intc/arm_gic.c
@@ -66,7 +66,7 @@ void gic_update(GICState *s)
best_prio = 0x100;
best_irq = 1023;
for (irq = 0; irq < s->num_irq; irq++) {
- if (GIC_TEST_ENABLED(irq, cm) && GIC_TEST_PENDING(irq, cm)) {
+ if (GIC_TEST_ENABLED(irq, cm) && gic_test_pending(s, irq, cm)) {
if (GIC_GET_PRIORITY(irq, cpu) < best_prio) {
best_prio = GIC_GET_PRIORITY(irq, cpu);
best_irq = irq;
@@ -89,14 +89,43 @@ void gic_set_pending_private(GICState *s, int cpu, int irq)
{
int cm = 1 << cpu;
- if (GIC_TEST_PENDING(irq, cm))
+ if (gic_test_pending(s, irq, cm)) {
return;
+ }
DPRINTF("Set %d pending cpu %d\n", irq, cpu);
GIC_SET_PENDING(irq, cm);
gic_update(s);
}
+static void gic_set_irq_11mpcore(GICState *s, int irq, int level,
+ int cm, int target)
+{
+ if (level) {
+ GIC_SET_LEVEL(irq, cm);
+ if (GIC_TEST_EDGE_TRIGGER(irq) || GIC_TEST_ENABLED(irq, cm)) {
+ DPRINTF("Set %d pending mask %x\n", irq, target);
+ GIC_SET_PENDING(irq, target);
+ }
+ } else {
+ GIC_CLEAR_LEVEL(irq, cm);
+ }
+}
+
+static void gic_set_irq_generic(GICState *s, int irq, int level,
+ int cm, int target)
+{
+ if (level) {
+ GIC_SET_LEVEL(irq, cm);
+ DPRINTF("Set %d pending mask %x\n", irq, target);
+ if (GIC_TEST_EDGE_TRIGGER(irq)) {
+ GIC_SET_PENDING(irq, target);
+ }
+ } else {
+ GIC_CLEAR_LEVEL(irq, cm);
+ }
+}
+
/* Process a change in an external IRQ input. */
static void gic_set_irq(void *opaque, int irq, int level)
{
@@ -122,19 +151,18 @@ static void gic_set_irq(void *opaque, int irq, int level)
target = cm;
}
+ assert(irq >= GIC_NR_SGIS);
+
if (level == GIC_TEST_LEVEL(irq, cm)) {
return;
}
- if (level) {
- GIC_SET_LEVEL(irq, cm);
- if (GIC_TEST_EDGE_TRIGGER(irq) || GIC_TEST_ENABLED(irq, cm)) {
- DPRINTF("Set %d pending mask %x\n", irq, target);
- GIC_SET_PENDING(irq, target);
- }
+ if (s->revision == REV_11MPCORE || s->revision == REV_NVIC) {
+ gic_set_irq_11mpcore(s, irq, level, cm, target);
} else {
- GIC_CLEAR_LEVEL(irq, cm);
+ gic_set_irq_generic(s, irq, level, cm, target);
}
+
gic_update(s);
}
@@ -151,21 +179,48 @@ static void gic_set_running_irq(GICState *s, int cpu, int irq)
uint32_t gic_acknowledge_irq(GICState *s, int cpu)
{
- int new_irq;
+ int ret, irq, src;
int cm = 1 << cpu;
- new_irq = s->current_pending[cpu];
- if (new_irq == 1023
- || GIC_GET_PRIORITY(new_irq, cpu) >= s->running_priority[cpu]) {
+ irq = s->current_pending[cpu];
+ if (irq == 1023
+ || GIC_GET_PRIORITY(irq, cpu) >= s->running_priority[cpu]) {
DPRINTF("ACK no pending IRQ\n");
return 1023;
}
- s->last_active[new_irq][cpu] = s->running_irq[cpu];
- /* Clear pending flags for both level and edge triggered interrupts.
- Level triggered IRQs will be reasserted once they become inactive. */
- GIC_CLEAR_PENDING(new_irq, GIC_TEST_MODEL(new_irq) ? ALL_CPU_MASK : cm);
- gic_set_running_irq(s, cpu, new_irq);
- DPRINTF("ACK %d\n", new_irq);
- return new_irq;
+ s->last_active[irq][cpu] = s->running_irq[cpu];
+
+ if (s->revision == REV_11MPCORE) {
+ /* Clear pending flags for both level and edge triggered interrupts.
+ * Level triggered IRQs will be reasserted once they become inactive.
+ */
+ GIC_CLEAR_PENDING(irq, GIC_TEST_MODEL(irq) ? ALL_CPU_MASK : cm);
+ ret = irq;
+ } else {
+ if (irq < GIC_NR_SGIS) {
+ /* Lookup the source CPU for the SGI and clear this in the
+ * sgi_pending map. Return the src and clear the overall pending
+ * state on this CPU if the SGI is not pending from any CPUs.
+ */
+ assert(s->sgi_pending[irq][cpu] != 0);
+ src = ctz32(s->sgi_pending[irq][cpu]);
+ s->sgi_pending[irq][cpu] &= ~(1 << src);
+ if (s->sgi_pending[irq][cpu] == 0) {
+ GIC_CLEAR_PENDING(irq, GIC_TEST_MODEL(irq) ? ALL_CPU_MASK : cm);
+ }
+ ret = irq | ((src & 0x7) << 10);
+ } else {
+ /* Clear pending state for both level and edge triggered
+ * interrupts. (level triggered interrupts with an active line
+ * remain pending, see gic_test_pending)
+ */
+ GIC_CLEAR_PENDING(irq, GIC_TEST_MODEL(irq) ? ALL_CPU_MASK : cm);
+ ret = irq;
+ }
+ }
+
+ gic_set_running_irq(s, cpu, irq);
+ DPRINTF("ACK %d\n", irq);
+ return ret;
}
void gic_set_priority(GICState *s, int cpu, int irq, uint8_t val)
@@ -195,14 +250,18 @@ void gic_complete_irq(GICState *s, int cpu, int irq)
}
if (s->running_irq[cpu] == 1023)
return; /* No active IRQ. */
- /* Mark level triggered interrupts as pending if they are still
- raised. */
- if (!GIC_TEST_EDGE_TRIGGER(irq) && GIC_TEST_ENABLED(irq, cm)
- && GIC_TEST_LEVEL(irq, cm) && (GIC_TARGET(irq) & cm) != 0) {
- DPRINTF("Set %d pending mask %x\n", irq, cm);
- GIC_SET_PENDING(irq, cm);
- update = 1;
+
+ if (s->revision == REV_11MPCORE || s->revision == REV_NVIC) {
+ /* Mark level triggered interrupts as pending if they are still
+ raised. */
+ if (!GIC_TEST_EDGE_TRIGGER(irq) && GIC_TEST_ENABLED(irq, cm)
+ && GIC_TEST_LEVEL(irq, cm) && (GIC_TARGET(irq) & cm) != 0) {
+ DPRINTF("Set %d pending mask %x\n", irq, cm);
+ GIC_SET_PENDING(irq, cm);
+ update = 1;
+ }
}
+
if (irq != s->running_irq[cpu]) {
/* Complete an IRQ that is not currently running. */
int tmp = s->running_irq[cpu];
@@ -273,7 +332,7 @@ static uint32_t gic_dist_readb(void *opaque, hwaddr offset)
res = 0;
mask = (irq < GIC_INTERNAL) ? cm : ALL_CPU_MASK;
for (i = 0; i < 8; i++) {
- if (GIC_TEST_PENDING(irq + i, mask)) {
+ if (gic_test_pending(s, irq + i, mask)) {
res |= (1 << i);
}
}
@@ -323,6 +382,22 @@ static uint32_t gic_dist_readb(void *opaque, hwaddr offset)
if (GIC_TEST_EDGE_TRIGGER(irq + i))
res |= (2 << (i * 2));
}
+ } else if (offset < 0xf10) {
+ goto bad_reg;
+ } else if (offset < 0xf30) {
+ if (s->revision == REV_11MPCORE || s->revision == REV_NVIC) {
+ goto bad_reg;
+ }
+
+ if (offset < 0xf20) {
+ /* GICD_CPENDSGIRn */
+ irq = (offset - 0xf10);
+ } else {
+ irq = (offset - 0xf20);
+ /* GICD_SPENDSGIRn */
+ }
+
+ res = s->sgi_pending[irq][cpu];
} else if (offset < 0xfe0) {
goto bad_reg;
} else /* offset >= 0xfe0 */ {
@@ -497,9 +572,31 @@ static void gic_dist_writeb(void *opaque, hwaddr offset,
GIC_CLEAR_EDGE_TRIGGER(irq + i);
}
}
- } else {
+ } else if (offset < 0xf10) {
/* 0xf00 is only handled for 32-bit writes. */
goto bad_reg;
+ } else if (offset < 0xf20) {
+ /* GICD_CPENDSGIRn */
+ if (s->revision == REV_11MPCORE || s->revision == REV_NVIC) {
+ goto bad_reg;
+ }
+ irq = (offset - 0xf10);
+
+ s->sgi_pending[irq][cpu] &= ~value;
+ if (s->sgi_pending[irq][cpu] == 0) {
+ GIC_CLEAR_PENDING(irq, 1 << cpu);
+ }
+ } else if (offset < 0xf30) {
+ /* GICD_SPENDSGIRn */
+ if (s->revision == REV_11MPCORE || s->revision == REV_NVIC) {
+ goto bad_reg;
+ }
+ irq = (offset - 0xf20);
+
+ GIC_SET_PENDING(irq, 1 << cpu);
+ s->sgi_pending[irq][cpu] |= value;
+ } else {
+ goto bad_reg;
}
gic_update(s);
return;
@@ -523,6 +620,7 @@ static void gic_dist_writel(void *opaque, hwaddr offset,
int cpu;
int irq;
int mask;
+ int target_cpu;
cpu = gic_get_current_cpu(s);
irq = value & 0x3ff;
@@ -542,6 +640,12 @@ static void gic_dist_writel(void *opaque, hwaddr offset,
break;
}
GIC_SET_PENDING(irq, mask);
+ target_cpu = ctz32(mask);
+ while (target_cpu < GIC_NCPU) {
+ s->sgi_pending[irq][target_cpu] |= (1 << cpu);
+ mask &= ~(1 << target_cpu);
+ target_cpu = ctz32(mask);
+ }
gic_update(s);
return;
}
@@ -565,14 +669,17 @@ static uint32_t gic_cpu_read(GICState *s, int cpu, int offset)
case 0x04: /* Priority mask */
return s->priority_mask[cpu];
case 0x08: /* Binary Point */
- /* ??? Not implemented. */
- return 0;
+ return s->bpr[cpu];
case 0x0c: /* Acknowledge */
return gic_acknowledge_irq(s, cpu);
case 0x14: /* Running Priority */
return s->running_priority[cpu];
case 0x18: /* Highest Pending Interrupt */
return s->current_pending[cpu];
+ case 0x1c: /* Aliased Binary Point */
+ return s->abpr[cpu];
+ case 0xd0: case 0xd4: case 0xd8: case 0xdc:
+ return s->apr[(offset - 0xd0) / 4][cpu];
default:
qemu_log_mask(LOG_GUEST_ERROR,
"gic_cpu_read: Bad offset %x\n", (int)offset);
@@ -591,10 +698,18 @@ static void gic_cpu_write(GICState *s, int cpu, int offset, uint32_t value)
s->priority_mask[cpu] = (value & 0xff);
break;
case 0x08: /* Binary Point */
- /* ??? Not implemented. */
+ s->bpr[cpu] = (value & 0x7);
break;
case 0x10: /* End Of Interrupt */
return gic_complete_irq(s, cpu, value & 0x3ff);
+ case 0x1c: /* Aliased Binary Point */
+ if (s->revision >= 2) {
+ s->abpr[cpu] = (value & 0x7);
+ }
+ break;
+ case 0xd0: case 0xd4: case 0xd8: case 0xdc:
+ qemu_log_mask(LOG_UNIMP, "Writing APR not implemented\n");
+ break;
default:
qemu_log_mask(LOG_GUEST_ERROR,
"gic_cpu_write: Bad offset %x\n", (int)offset);
diff --git a/hw/intc/arm_gic_common.c b/hw/intc/arm_gic_common.c
index e4fc65028a..6d884eca3b 100644
--- a/hw/intc/arm_gic_common.c
+++ b/hw/intc/arm_gic_common.c
@@ -58,8 +58,8 @@ static const VMStateDescription vmstate_gic_irq_state = {
static const VMStateDescription vmstate_gic = {
.name = "arm_gic",
- .version_id = 4,
- .minimum_version_id = 4,
+ .version_id = 7,
+ .minimum_version_id = 7,
.pre_save = gic_pre_save,
.post_load = gic_post_load,
.fields = (VMStateField[]) {
@@ -71,10 +71,14 @@ static const VMStateDescription vmstate_gic = {
VMSTATE_UINT8_2DARRAY(priority1, GICState, GIC_INTERNAL, GIC_NCPU),
VMSTATE_UINT8_ARRAY(priority2, GICState, GIC_MAXIRQ - GIC_INTERNAL),
VMSTATE_UINT16_2DARRAY(last_active, GICState, GIC_MAXIRQ, GIC_NCPU),
+ VMSTATE_UINT8_2DARRAY(sgi_pending, GICState, GIC_NR_SGIS, GIC_NCPU),
VMSTATE_UINT16_ARRAY(priority_mask, GICState, GIC_NCPU),
VMSTATE_UINT16_ARRAY(running_irq, GICState, GIC_NCPU),
VMSTATE_UINT16_ARRAY(running_priority, GICState, GIC_NCPU),
VMSTATE_UINT16_ARRAY(current_pending, GICState, GIC_NCPU),
+ VMSTATE_UINT8_ARRAY(bpr, GICState, GIC_NCPU),
+ VMSTATE_UINT8_ARRAY(abpr, GICState, GIC_NCPU),
+ VMSTATE_UINT32_2DARRAY(apr, GICState, GIC_NR_APRS, GIC_NCPU),
VMSTATE_END_OF_LIST()
}
};
diff --git a/hw/intc/gic_internal.h b/hw/intc/gic_internal.h
index 8c02d5888c..92a6f7a3ff 100644
--- a/hw/intc/gic_internal.h
+++ b/hw/intc/gic_internal.h
@@ -34,7 +34,6 @@
#define GIC_TEST_ENABLED(irq, cm) ((s->irq_state[irq].enabled & (cm)) != 0)
#define GIC_SET_PENDING(irq, cm) s->irq_state[irq].pending |= (cm)
#define GIC_CLEAR_PENDING(irq, cm) s->irq_state[irq].pending &= ~(cm)
-#define GIC_TEST_PENDING(irq, cm) ((s->irq_state[irq].pending & (cm)) != 0)
#define GIC_SET_ACTIVE(irq, cm) s->irq_state[irq].active |= (cm)
#define GIC_CLEAR_ACTIVE(irq, cm) s->irq_state[irq].active &= ~(cm)
#define GIC_TEST_ACTIVE(irq, cm) ((s->irq_state[irq].active & (cm)) != 0)
@@ -63,4 +62,19 @@ void gic_update(GICState *s);
void gic_init_irqs_and_distributor(GICState *s, int num_irq);
void gic_set_priority(GICState *s, int cpu, int irq, uint8_t val);
+static inline bool gic_test_pending(GICState *s, int irq, int cm)
+{
+ if (s->revision == REV_NVIC || s->revision == REV_11MPCORE) {
+ return s->irq_state[irq].pending & cm;
+ } else {
+ /* Edge-triggered interrupts are marked pending on a rising edge, but
+ * level-triggered interrupts are either considered pending when the
+ * level is active or if software has explicitly written to
+ * GICD_ISPENDR to set the state pending.
+ */
+ return (s->irq_state[irq].pending & cm) ||
+ (!GIC_TEST_EDGE_TRIGGER(irq) && GIC_TEST_LEVEL(irq, cm));
+ }
+}
+
#endif /* !QEMU_ARM_GIC_INTERNAL_H */
diff --git a/hw/microblaze/petalogix_ml605_mmu.c b/hw/microblaze/petalogix_ml605_mmu.c
index 1a87756246..37cbbfd592 100644
--- a/hw/microblaze/petalogix_ml605_mmu.c
+++ b/hw/microblaze/petalogix_ml605_mmu.c
@@ -75,7 +75,6 @@ static void
petalogix_ml605_init(QEMUMachineInitArgs *args)
{
ram_addr_t ram_size = args->ram_size;
- const char *cpu_model = args->cpu_model;
MemoryRegion *address_space_mem = get_system_memory();
DeviceState *dev, *dma, *eth0;
Object *ds, *cs;
@@ -89,10 +88,8 @@ petalogix_ml605_init(QEMUMachineInitArgs *args)
qemu_irq irq[32];
/* init CPUs */
- if (cpu_model == NULL) {
- cpu_model = "microblaze";
- }
- cpu = cpu_mb_init(cpu_model);
+ cpu = MICROBLAZE_CPU(object_new(TYPE_MICROBLAZE_CPU));
+ object_property_set_bool(OBJECT(cpu), true, "realized", &error_abort);
/* Attach emulated BRAM through the LMB. */
memory_region_init_ram(phys_lmb_bram, NULL, "petalogix_ml605.lmb_bram",
diff --git a/hw/misc/zynq_slcr.c b/hw/misc/zynq_slcr.c
index e42a5b04ab..d1cc23303a 100644
--- a/hw/misc/zynq_slcr.c
+++ b/hw/misc/zynq_slcr.c
@@ -31,6 +31,8 @@
#define XILINX_LOCK_KEY 0x767b
#define XILINX_UNLOCK_KEY 0xdf0d
+#define R_PSS_RST_CTRL_SOFT_RST 0x1
+
typedef enum {
ARM_PLL_CTRL,
DDR_PLL_CTRL,
@@ -399,6 +401,9 @@ static void zynq_slcr_write(void *opaque, hwaddr offset,
goto bad_reg;
}
s->reset[(offset - 0x200) / 4] = val;
+ if (offset == 0x200 && (val & R_PSS_RST_CTRL_SOFT_RST)) {
+ qemu_system_reset_request();
+ }
break;
case 0x300:
s->apu_ctrl = val;
diff --git a/hw/net/Makefile.objs b/hw/net/Makefile.objs
index 951cca3a4b..75e80c2c48 100644
--- a/hw/net/Makefile.objs
+++ b/hw/net/Makefile.objs
@@ -18,6 +18,7 @@ common-obj-$(CONFIG_OPENCORES_ETH) += opencores_eth.o
common-obj-$(CONFIG_XGMAC) += xgmac.o
common-obj-$(CONFIG_MIPSNET) += mipsnet.o
common-obj-$(CONFIG_XILINX_AXI) += xilinx_axienet.o
+common-obj-$(CONFIG_ALLWINNER_EMAC) += allwinner_emac.o
common-obj-$(CONFIG_CADENCE) += cadence_gem.o
common-obj-$(CONFIG_STELLARIS_ENET) += stellaris_enet.o
diff --git a/hw/net/allwinner_emac.c b/hw/net/allwinner_emac.c
new file mode 100644
index 0000000000..469f2f0ede
--- /dev/null
+++ b/hw/net/allwinner_emac.c
@@ -0,0 +1,539 @@
+/*
+ * Emulation of Allwinner EMAC Fast Ethernet controller and
+ * Realtek RTL8201CP PHY
+ *
+ * Copyright (C) 2014 Beniamino Galvani <b.galvani@gmail.com>
+ *
+ * This model is based on reverse-engineering of Linux kernel driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include "hw/sysbus.h"
+#include "net/net.h"
+#include "qemu/fifo8.h"
+#include "hw/net/allwinner_emac.h"
+#include <zlib.h>
+
+static uint8_t padding[60];
+
+static void mii_set_link(RTL8201CPState *mii, bool link_ok)
+{
+ if (link_ok) {
+ mii->bmsr |= MII_BMSR_LINK_ST;
+ mii->anlpar |= MII_ANAR_TXFD | MII_ANAR_10FD | MII_ANAR_10 |
+ MII_ANAR_CSMACD;
+ } else {
+ mii->bmsr &= ~MII_BMSR_LINK_ST;
+ mii->anlpar = MII_ANAR_TX;
+ }
+}
+
+static void mii_reset(RTL8201CPState *mii, bool link_ok)
+{
+ mii->bmcr = MII_BMCR_FD | MII_BMCR_AUTOEN | MII_BMCR_SPEED;
+ mii->bmsr = MII_BMSR_100TX_FD | MII_BMSR_100TX_HD | MII_BMSR_10T_FD |
+ MII_BMSR_10T_HD | MII_BMSR_MFPS | MII_BMSR_AUTONEG;
+ mii->anar = MII_ANAR_TXFD | MII_ANAR_TX | MII_ANAR_10FD | MII_ANAR_10 |
+ MII_ANAR_CSMACD;
+ mii->anlpar = MII_ANAR_TX;
+
+ mii_set_link(mii, link_ok);
+}
+
+static uint16_t RTL8201CP_mdio_read(AwEmacState *s, uint8_t addr, uint8_t reg)
+{
+ RTL8201CPState *mii = &s->mii;
+ uint16_t ret = 0xffff;
+
+ if (addr == s->phy_addr) {
+ switch (reg) {
+ case MII_BMCR:
+ return mii->bmcr;
+ case MII_BMSR:
+ return mii->bmsr;
+ case MII_PHYID1:
+ return RTL8201CP_PHYID1;
+ case MII_PHYID2:
+ return RTL8201CP_PHYID2;
+ case MII_ANAR:
+ return mii->anar;
+ case MII_ANLPAR:
+ return mii->anlpar;
+ case MII_ANER:
+ case MII_NSR:
+ case MII_LBREMR:
+ case MII_REC:
+ case MII_SNRDR:
+ case MII_TEST:
+ qemu_log_mask(LOG_UNIMP,
+ "allwinner_emac: read from unimpl. mii reg 0x%x\n",
+ reg);
+ return 0;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "allwinner_emac: read from invalid mii reg 0x%x\n",
+ reg);
+ return 0;
+ }
+ }
+ return ret;
+}
+
+static void RTL8201CP_mdio_write(AwEmacState *s, uint8_t addr, uint8_t reg,
+ uint16_t value)
+{
+ RTL8201CPState *mii = &s->mii;
+ NetClientState *nc;
+
+ if (addr == s->phy_addr) {
+ switch (reg) {
+ case MII_BMCR:
+ if (value & MII_BMCR_RESET) {
+ nc = qemu_get_queue(s->nic);
+ mii_reset(mii, !nc->link_down);
+ } else {
+ mii->bmcr = value;
+ }
+ break;
+ case MII_ANAR:
+ mii->anar = value;
+ break;
+ case MII_BMSR:
+ case MII_PHYID1:
+ case MII_PHYID2:
+ case MII_ANLPAR:
+ case MII_ANER:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "allwinner_emac: write to read-only mii reg 0x%x\n",
+ reg);
+ break;
+ case MII_NSR:
+ case MII_LBREMR:
+ case MII_REC:
+ case MII_SNRDR:
+ case MII_TEST:
+ qemu_log_mask(LOG_UNIMP,
+ "allwinner_emac: write to unimpl. mii reg 0x%x\n",
+ reg);
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "allwinner_emac: write to invalid mii reg 0x%x\n",
+ reg);
+ }
+ }
+}
+
+static void aw_emac_update_irq(AwEmacState *s)
+{
+ qemu_set_irq(s->irq, (s->int_sta & s->int_ctl) != 0);
+}
+
+static void aw_emac_tx_reset(AwEmacState *s, int chan)
+{
+ fifo8_reset(&s->tx_fifo[chan]);
+ s->tx_length[chan] = 0;
+}
+
+static void aw_emac_rx_reset(AwEmacState *s)
+{
+ fifo8_reset(&s->rx_fifo);
+ s->rx_num_packets = 0;
+ s->rx_packet_size = 0;
+ s->rx_packet_pos = 0;
+}
+
+static void fifo8_push_word(Fifo8 *fifo, uint32_t val)
+{
+ fifo8_push(fifo, val);
+ fifo8_push(fifo, val >> 8);
+ fifo8_push(fifo, val >> 16);
+ fifo8_push(fifo, val >> 24);
+}
+
+static uint32_t fifo8_pop_word(Fifo8 *fifo)
+{
+ uint32_t ret;
+
+ ret = fifo8_pop(fifo);
+ ret |= fifo8_pop(fifo) << 8;
+ ret |= fifo8_pop(fifo) << 16;
+ ret |= fifo8_pop(fifo) << 24;
+
+ return ret;
+}
+
+static int aw_emac_can_receive(NetClientState *nc)
+{
+ AwEmacState *s = qemu_get_nic_opaque(nc);
+
+ /*
+ * To avoid packet drops, allow reception only when there is space
+ * for a full frame: 1522 + 8 (rx headers) + 2 (padding).
+ */
+ return (s->ctl & EMAC_CTL_RX_EN) && (fifo8_num_free(&s->rx_fifo) >= 1532);
+}
+
+static ssize_t aw_emac_receive(NetClientState *nc, const uint8_t *buf,
+ size_t size)
+{
+ AwEmacState *s = qemu_get_nic_opaque(nc);
+ Fifo8 *fifo = &s->rx_fifo;
+ size_t padded_size, total_size;
+ uint32_t crc;
+
+ padded_size = size > 60 ? size : 60;
+ total_size = QEMU_ALIGN_UP(RX_HDR_SIZE + padded_size + CRC_SIZE, 4);
+
+ if (!(s->ctl & EMAC_CTL_RX_EN) || (fifo8_num_free(fifo) < total_size)) {
+ return -1;
+ }
+
+ fifo8_push_word(fifo, EMAC_UNDOCUMENTED_MAGIC);
+ fifo8_push_word(fifo, EMAC_RX_HEADER(padded_size + CRC_SIZE,
+ EMAC_RX_IO_DATA_STATUS_OK));
+ fifo8_push_all(fifo, buf, size);
+ crc = crc32(~0, buf, size);
+
+ if (padded_size != size) {
+ fifo8_push_all(fifo, padding, padded_size - size);
+ crc = crc32(crc, padding, padded_size - size);
+ }
+
+ fifo8_push_word(fifo, crc);
+ fifo8_push_all(fifo, padding, QEMU_ALIGN_UP(padded_size, 4) - padded_size);
+ s->rx_num_packets++;
+
+ s->int_sta |= EMAC_INT_RX;
+ aw_emac_update_irq(s);
+
+ return size;
+}
+
+static void aw_emac_cleanup(NetClientState *nc)
+{
+ AwEmacState *s = qemu_get_nic_opaque(nc);
+
+ s->nic = NULL;
+}
+
+static void aw_emac_reset(DeviceState *dev)
+{
+ AwEmacState *s = AW_EMAC(dev);
+ NetClientState *nc = qemu_get_queue(s->nic);
+
+ s->ctl = 0;
+ s->tx_mode = 0;
+ s->int_ctl = 0;
+ s->int_sta = 0;
+ s->tx_channel = 0;
+ s->phy_target = 0;
+
+ aw_emac_tx_reset(s, 0);
+ aw_emac_tx_reset(s, 1);
+ aw_emac_rx_reset(s);
+
+ mii_reset(&s->mii, !nc->link_down);
+}
+
+static uint64_t aw_emac_read(void *opaque, hwaddr offset, unsigned size)
+{
+ AwEmacState *s = opaque;
+ Fifo8 *fifo = &s->rx_fifo;
+ NetClientState *nc;
+ uint64_t ret;
+
+ switch (offset) {
+ case EMAC_CTL_REG:
+ return s->ctl;
+ case EMAC_TX_MODE_REG:
+ return s->tx_mode;
+ case EMAC_TX_INS_REG:
+ return s->tx_channel;
+ case EMAC_RX_CTL_REG:
+ return s->rx_ctl;
+ case EMAC_RX_IO_DATA_REG:
+ if (!s->rx_num_packets) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Read IO data register when no packet available");
+ return 0;
+ }
+
+ ret = fifo8_pop_word(fifo);
+
+ switch (s->rx_packet_pos) {
+ case 0: /* Word is magic header */
+ s->rx_packet_pos += 4;
+ break;
+ case 4: /* Word is rx info header */
+ s->rx_packet_pos += 4;
+ s->rx_packet_size = QEMU_ALIGN_UP(extract32(ret, 0, 16), 4);
+ break;
+ default: /* Word is packet data */
+ s->rx_packet_pos += 4;
+ s->rx_packet_size -= 4;
+
+ if (!s->rx_packet_size) {
+ s->rx_packet_pos = 0;
+ s->rx_num_packets--;
+ nc = qemu_get_queue(s->nic);
+ if (aw_emac_can_receive(nc)) {
+ qemu_flush_queued_packets(nc);
+ }
+ }
+ }
+ return ret;
+ case EMAC_RX_FBC_REG:
+ return s->rx_num_packets;
+ case EMAC_INT_CTL_REG:
+ return s->int_ctl;
+ case EMAC_INT_STA_REG:
+ return s->int_sta;
+ case EMAC_MAC_MRDD_REG:
+ return RTL8201CP_mdio_read(s,
+ extract32(s->phy_target, PHY_ADDR_SHIFT, 8),
+ extract32(s->phy_target, PHY_REG_SHIFT, 8));
+ default:
+ qemu_log_mask(LOG_UNIMP,
+ "allwinner_emac: read access to unknown register 0x"
+ TARGET_FMT_plx "\n", offset);
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static void aw_emac_write(void *opaque, hwaddr offset, uint64_t value,
+ unsigned size)
+{
+ AwEmacState *s = opaque;
+ Fifo8 *fifo;
+ NetClientState *nc = qemu_get_queue(s->nic);
+ int chan;
+
+ switch (offset) {
+ case EMAC_CTL_REG:
+ if (value & EMAC_CTL_RESET) {
+ aw_emac_reset(DEVICE(s));
+ value &= ~EMAC_CTL_RESET;
+ }
+ s->ctl = value;
+ if (aw_emac_can_receive(nc)) {
+ qemu_flush_queued_packets(nc);
+ }
+ break;
+ case EMAC_TX_MODE_REG:
+ s->tx_mode = value;
+ break;
+ case EMAC_TX_CTL0_REG:
+ case EMAC_TX_CTL1_REG:
+ chan = (offset == EMAC_TX_CTL0_REG ? 0 : 1);
+ if ((value & 1) && (s->ctl & EMAC_CTL_TX_EN)) {
+ uint32_t len, ret;
+ const uint8_t *data;
+
+ fifo = &s->tx_fifo[chan];
+ len = s->tx_length[chan];
+
+ if (len > fifo8_num_used(fifo)) {
+ len = fifo8_num_used(fifo);
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "allwinner_emac: TX length > fifo data length\n");
+ }
+ if (len > 0) {
+ data = fifo8_pop_buf(fifo, len, &ret);
+ qemu_send_packet(nc, data, ret);
+ aw_emac_tx_reset(s, chan);
+ /* Raise TX interrupt */
+ s->int_sta |= EMAC_INT_TX_CHAN(chan);
+ aw_emac_update_irq(s);
+ }
+ }
+ break;
+ case EMAC_TX_INS_REG:
+ s->tx_channel = value < NUM_TX_FIFOS ? value : 0;
+ break;
+ case EMAC_TX_PL0_REG:
+ case EMAC_TX_PL1_REG:
+ chan = (offset == EMAC_TX_PL0_REG ? 0 : 1);
+ if (value > TX_FIFO_SIZE) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "allwinner_emac: invalid TX frame length %d\n",
+ (int)value);
+ value = TX_FIFO_SIZE;
+ }
+ s->tx_length[chan] = value;
+ break;
+ case EMAC_TX_IO_DATA_REG:
+ fifo = &s->tx_fifo[s->tx_channel];
+ if (fifo8_num_free(fifo) < 4) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "allwinner_emac: TX data overruns fifo\n");
+ break;
+ }
+ fifo8_push_word(fifo, value);
+ break;
+ case EMAC_RX_CTL_REG:
+ s->rx_ctl = value;
+ break;
+ case EMAC_RX_FBC_REG:
+ if (value == 0) {
+ aw_emac_rx_reset(s);
+ }
+ break;
+ case EMAC_INT_CTL_REG:
+ s->int_ctl = value;
+ break;
+ case EMAC_INT_STA_REG:
+ s->int_sta &= ~value;
+ break;
+ case EMAC_MAC_MADR_REG:
+ s->phy_target = value;
+ break;
+ case EMAC_MAC_MWTD_REG:
+ RTL8201CP_mdio_write(s, extract32(s->phy_target, PHY_ADDR_SHIFT, 8),
+ extract32(s->phy_target, PHY_REG_SHIFT, 8), value);
+ break;
+ default:
+ qemu_log_mask(LOG_UNIMP,
+ "allwinner_emac: write access to unknown register 0x"
+ TARGET_FMT_plx "\n", offset);
+ }
+}
+
+static void aw_emac_set_link(NetClientState *nc)
+{
+ AwEmacState *s = qemu_get_nic_opaque(nc);
+
+ mii_set_link(&s->mii, !nc->link_down);
+}
+
+static const MemoryRegionOps aw_emac_mem_ops = {
+ .read = aw_emac_read,
+ .write = aw_emac_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+};
+
+static NetClientInfo net_aw_emac_info = {
+ .type = NET_CLIENT_OPTIONS_KIND_NIC,
+ .size = sizeof(NICState),
+ .can_receive = aw_emac_can_receive,
+ .receive = aw_emac_receive,
+ .cleanup = aw_emac_cleanup,
+ .link_status_changed = aw_emac_set_link,
+};
+
+static void aw_emac_init(Object *obj)
+{
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
+ AwEmacState *s = AW_EMAC(obj);
+
+ memory_region_init_io(&s->iomem, OBJECT(s), &aw_emac_mem_ops, s,
+ "aw_emac", 0x1000);
+ sysbus_init_mmio(sbd, &s->iomem);
+ sysbus_init_irq(sbd, &s->irq);
+}
+
+static void aw_emac_realize(DeviceState *dev, Error **errp)
+{
+ AwEmacState *s = AW_EMAC(dev);
+
+ qemu_macaddr_default_if_unset(&s->conf.macaddr);
+ s->nic = qemu_new_nic(&net_aw_emac_info, &s->conf,
+ object_get_typename(OBJECT(dev)), dev->id, s);
+ qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a);
+
+ fifo8_create(&s->rx_fifo, RX_FIFO_SIZE);
+ fifo8_create(&s->tx_fifo[0], TX_FIFO_SIZE);
+ fifo8_create(&s->tx_fifo[1], TX_FIFO_SIZE);
+}
+
+static Property aw_emac_properties[] = {
+ DEFINE_NIC_PROPERTIES(AwEmacState, conf),
+ DEFINE_PROP_UINT8("phy-addr", AwEmacState, phy_addr, 0),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static const VMStateDescription vmstate_mii = {
+ .name = "rtl8201cp",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT16(bmcr, RTL8201CPState),
+ VMSTATE_UINT16(bmsr, RTL8201CPState),
+ VMSTATE_UINT16(anar, RTL8201CPState),
+ VMSTATE_UINT16(anlpar, RTL8201CPState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static int aw_emac_post_load(void *opaque, int version_id)
+{
+ AwEmacState *s = opaque;
+
+ aw_emac_set_link(qemu_get_queue(s->nic));
+
+ return 0;
+}
+
+static const VMStateDescription vmstate_aw_emac = {
+ .name = "allwinner_emac",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .post_load = aw_emac_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_STRUCT(mii, AwEmacState, 1, vmstate_mii, RTL8201CPState),
+ VMSTATE_UINT32(ctl, AwEmacState),
+ VMSTATE_UINT32(tx_mode, AwEmacState),
+ VMSTATE_UINT32(rx_ctl, AwEmacState),
+ VMSTATE_UINT32(int_ctl, AwEmacState),
+ VMSTATE_UINT32(int_sta, AwEmacState),
+ VMSTATE_UINT32(phy_target, AwEmacState),
+ VMSTATE_FIFO8(rx_fifo, AwEmacState),
+ VMSTATE_UINT32(rx_num_packets, AwEmacState),
+ VMSTATE_UINT32(rx_packet_size, AwEmacState),
+ VMSTATE_UINT32(rx_packet_pos, AwEmacState),
+ VMSTATE_STRUCT_ARRAY(tx_fifo, AwEmacState, NUM_TX_FIFOS, 1,
+ vmstate_fifo8, Fifo8),
+ VMSTATE_UINT32_ARRAY(tx_length, AwEmacState, NUM_TX_FIFOS),
+ VMSTATE_UINT32(tx_channel, AwEmacState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void aw_emac_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->realize = aw_emac_realize;
+ dc->props = aw_emac_properties;
+ dc->reset = aw_emac_reset;
+ dc->vmsd = &vmstate_aw_emac;
+}
+
+static const TypeInfo aw_emac_info = {
+ .name = TYPE_AW_EMAC,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(AwEmacState),
+ .instance_init = aw_emac_init,
+ .class_init = aw_emac_class_init,
+};
+
+static void aw_emac_register_types(void)
+{
+ type_register_static(&aw_emac_info);
+}
+
+type_init(aw_emac_register_types)
diff --git a/hw/net/vmware_utils.h b/hw/net/vmware_utils.h
index 5307e2ccc9..1099df669d 100644
--- a/hw/net/vmware_utils.h
+++ b/hw/net/vmware_utils.h
@@ -65,7 +65,7 @@ vmw_shmem_set(hwaddr addr, uint8 val, int len)
static inline uint32_t
vmw_shmem_ld8(hwaddr addr)
{
- uint8_t res = ldub_phys(addr);
+ uint8_t res = ldub_phys(&address_space_memory, addr);
VMW_SHPRN("SHMEM load8: %" PRIx64 " (value 0x%X)", addr, res);
return res;
}
@@ -74,13 +74,13 @@ static inline void
vmw_shmem_st8(hwaddr addr, uint8_t value)
{
VMW_SHPRN("SHMEM store8: %" PRIx64 " (value 0x%X)", addr, value);
- stb_phys(addr, value);
+ stb_phys(&address_space_memory, addr, value);
}
static inline uint32_t
vmw_shmem_ld16(hwaddr addr)
{
- uint16_t res = lduw_le_phys(addr);
+ uint16_t res = lduw_le_phys(&address_space_memory, addr);
VMW_SHPRN("SHMEM load16: %" PRIx64 " (value 0x%X)", addr, res);
return res;
}
@@ -89,13 +89,13 @@ static inline void
vmw_shmem_st16(hwaddr addr, uint16_t value)
{
VMW_SHPRN("SHMEM store16: %" PRIx64 " (value 0x%X)", addr, value);
- stw_le_phys(addr, value);
+ stw_le_phys(&address_space_memory, addr, value);
}
static inline uint32_t
vmw_shmem_ld32(hwaddr addr)
{
- uint32_t res = ldl_le_phys(addr);
+ uint32_t res = ldl_le_phys(&address_space_memory, addr);
VMW_SHPRN("SHMEM load32: %" PRIx64 " (value 0x%X)", addr, res);
return res;
}
@@ -104,13 +104,13 @@ static inline void
vmw_shmem_st32(hwaddr addr, uint32_t value)
{
VMW_SHPRN("SHMEM store32: %" PRIx64 " (value 0x%X)", addr, value);
- stl_le_phys(addr, value);
+ stl_le_phys(&address_space_memory, addr, value);
}
static inline uint64_t
vmw_shmem_ld64(hwaddr addr)
{
- uint64_t res = ldq_le_phys(addr);
+ uint64_t res = ldq_le_phys(&address_space_memory, addr);
VMW_SHPRN("SHMEM load64: %" PRIx64 " (value %" PRIx64 ")", addr, res);
return res;
}
@@ -119,7 +119,7 @@ static inline void
vmw_shmem_st64(hwaddr addr, uint64_t value)
{
VMW_SHPRN("SHMEM store64: %" PRIx64 " (value %" PRIx64 ")", addr, value);
- stq_le_phys(addr, value);
+ stq_le_phys(&address_space_memory, addr, value);
}
/* Macros for simplification of operations on array-style registers */
diff --git a/hw/pci/msi.c b/hw/pci/msi.c
index 2a04d18884..a4a3040d4d 100644
--- a/hw/pci/msi.c
+++ b/hw/pci/msi.c
@@ -291,7 +291,7 @@ void msi_notify(PCIDevice *dev, unsigned int vector)
"notify vector 0x%x"
" address: 0x%"PRIx64" data: 0x%"PRIx32"\n",
vector, msg.address, msg.data);
- stl_le_phys(msg.address, msg.data);
+ stl_le_phys(&address_space_memory, msg.address, msg.data);
}
/* Normally called by pci_default_write_config(). */
diff --git a/hw/pci/msix.c b/hw/pci/msix.c
index 3430770f33..5c49bfc304 100644
--- a/hw/pci/msix.c
+++ b/hw/pci/msix.c
@@ -439,7 +439,7 @@ void msix_notify(PCIDevice *dev, unsigned vector)
msg = msix_get_message(dev, vector);
- stl_le_phys(msg.address, msg.data);
+ stl_le_phys(&address_space_memory, msg.address, msg.data);
}
void msix_reset(PCIDevice *dev)
diff --git a/hw/ppc/ppc405_uc.c b/hw/ppc/ppc405_uc.c
index 8109f92200..ca520e8859 100644
--- a/hw/ppc/ppc405_uc.c
+++ b/hw/ppc/ppc405_uc.c
@@ -44,6 +44,7 @@
ram_addr_t ppc405_set_bootinfo (CPUPPCState *env, ppc4xx_bd_info_t *bd,
uint32_t flags)
{
+ CPUState *cs = ENV_GET_CPU(env);
ram_addr_t bdloc;
int i, n;
@@ -52,42 +53,42 @@ ram_addr_t ppc405_set_bootinfo (CPUPPCState *env, ppc4xx_bd_info_t *bd,
bdloc = 0x01000000UL - sizeof(struct ppc4xx_bd_info_t);
else
bdloc = bd->bi_memsize - sizeof(struct ppc4xx_bd_info_t);
- stl_be_phys(bdloc + 0x00, bd->bi_memstart);
- stl_be_phys(bdloc + 0x04, bd->bi_memsize);
- stl_be_phys(bdloc + 0x08, bd->bi_flashstart);
- stl_be_phys(bdloc + 0x0C, bd->bi_flashsize);
- stl_be_phys(bdloc + 0x10, bd->bi_flashoffset);
- stl_be_phys(bdloc + 0x14, bd->bi_sramstart);
- stl_be_phys(bdloc + 0x18, bd->bi_sramsize);
- stl_be_phys(bdloc + 0x1C, bd->bi_bootflags);
- stl_be_phys(bdloc + 0x20, bd->bi_ipaddr);
+ stl_be_phys(cs->as, bdloc + 0x00, bd->bi_memstart);
+ stl_be_phys(cs->as, bdloc + 0x04, bd->bi_memsize);
+ stl_be_phys(cs->as, bdloc + 0x08, bd->bi_flashstart);
+ stl_be_phys(cs->as, bdloc + 0x0C, bd->bi_flashsize);
+ stl_be_phys(cs->as, bdloc + 0x10, bd->bi_flashoffset);
+ stl_be_phys(cs->as, bdloc + 0x14, bd->bi_sramstart);
+ stl_be_phys(cs->as, bdloc + 0x18, bd->bi_sramsize);
+ stl_be_phys(cs->as, bdloc + 0x1C, bd->bi_bootflags);
+ stl_be_phys(cs->as, bdloc + 0x20, bd->bi_ipaddr);
for (i = 0; i < 6; i++) {
- stb_phys(bdloc + 0x24 + i, bd->bi_enetaddr[i]);
+ stb_phys(cs->as, bdloc + 0x24 + i, bd->bi_enetaddr[i]);
}
- stw_be_phys(bdloc + 0x2A, bd->bi_ethspeed);
- stl_be_phys(bdloc + 0x2C, bd->bi_intfreq);
- stl_be_phys(bdloc + 0x30, bd->bi_busfreq);
- stl_be_phys(bdloc + 0x34, bd->bi_baudrate);
+ stw_be_phys(cs->as, bdloc + 0x2A, bd->bi_ethspeed);
+ stl_be_phys(cs->as, bdloc + 0x2C, bd->bi_intfreq);
+ stl_be_phys(cs->as, bdloc + 0x30, bd->bi_busfreq);
+ stl_be_phys(cs->as, bdloc + 0x34, bd->bi_baudrate);
for (i = 0; i < 4; i++) {
- stb_phys(bdloc + 0x38 + i, bd->bi_s_version[i]);
+ stb_phys(cs->as, bdloc + 0x38 + i, bd->bi_s_version[i]);
}
for (i = 0; i < 32; i++) {
- stb_phys(bdloc + 0x3C + i, bd->bi_r_version[i]);
+ stb_phys(cs->as, bdloc + 0x3C + i, bd->bi_r_version[i]);
}
- stl_be_phys(bdloc + 0x5C, bd->bi_plb_busfreq);
- stl_be_phys(bdloc + 0x60, bd->bi_pci_busfreq);
+ stl_be_phys(cs->as, bdloc + 0x5C, bd->bi_plb_busfreq);
+ stl_be_phys(cs->as, bdloc + 0x60, bd->bi_pci_busfreq);
for (i = 0; i < 6; i++) {
- stb_phys(bdloc + 0x64 + i, bd->bi_pci_enetaddr[i]);
+ stb_phys(cs->as, bdloc + 0x64 + i, bd->bi_pci_enetaddr[i]);
}
n = 0x6A;
if (flags & 0x00000001) {
for (i = 0; i < 6; i++)
- stb_phys(bdloc + n++, bd->bi_pci_enetaddr2[i]);
+ stb_phys(cs->as, bdloc + n++, bd->bi_pci_enetaddr2[i]);
}
- stl_be_phys(bdloc + n, bd->bi_opbfreq);
+ stl_be_phys(cs->as, bdloc + n, bd->bi_opbfreq);
n += 4;
for (i = 0; i < 2; i++) {
- stl_be_phys(bdloc + n, bd->bi_iic_fast[i]);
+ stl_be_phys(cs->as, bdloc + n, bd->bi_iic_fast[i]);
n += 4;
}
diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c
index f755a53923..3ffcc65f03 100644
--- a/hw/ppc/spapr_hcall.c
+++ b/hw/ppc/spapr_hcall.c
@@ -341,6 +341,7 @@ static target_ulong h_set_dabr(PowerPCCPU *cpu, sPAPREnvironment *spapr,
static target_ulong register_vpa(CPUPPCState *env, target_ulong vpa)
{
+ CPUState *cs = ENV_GET_CPU(env);
uint16_t size;
uint8_t tmp;
@@ -354,7 +355,7 @@ static target_ulong register_vpa(CPUPPCState *env, target_ulong vpa)
}
/* FIXME: bounds check the address */
- size = lduw_be_phys(vpa + 0x4);
+ size = lduw_be_phys(cs->as, vpa + 0x4);
if (size < VPA_MIN_SIZE) {
return H_PARAMETER;
@@ -367,9 +368,9 @@ static target_ulong register_vpa(CPUPPCState *env, target_ulong vpa)
env->vpa_addr = vpa;
- tmp = ldub_phys(env->vpa_addr + VPA_SHARED_PROC_OFFSET);
+ tmp = ldub_phys(cs->as, env->vpa_addr + VPA_SHARED_PROC_OFFSET);
tmp |= VPA_SHARED_PROC_VAL;
- stb_phys(env->vpa_addr + VPA_SHARED_PROC_OFFSET, tmp);
+ stb_phys(cs->as, env->vpa_addr + VPA_SHARED_PROC_OFFSET, tmp);
return H_SUCCESS;
}
@@ -390,6 +391,7 @@ static target_ulong deregister_vpa(CPUPPCState *env, target_ulong vpa)
static target_ulong register_slb_shadow(CPUPPCState *env, target_ulong addr)
{
+ CPUState *cs = ENV_GET_CPU(env);
uint32_t size;
if (addr == 0) {
@@ -397,7 +399,7 @@ static target_ulong register_slb_shadow(CPUPPCState *env, target_ulong addr)
return H_HARDWARE;
}
- size = ldl_be_phys(addr + 0x4);
+ size = ldl_be_phys(cs->as, addr + 0x4);
if (size < 0x8) {
return H_PARAMETER;
}
@@ -425,6 +427,7 @@ static target_ulong deregister_slb_shadow(CPUPPCState *env, target_ulong addr)
static target_ulong register_dtl(CPUPPCState *env, target_ulong addr)
{
+ CPUState *cs = ENV_GET_CPU(env);
uint32_t size;
if (addr == 0) {
@@ -432,7 +435,7 @@ static target_ulong register_dtl(CPUPPCState *env, target_ulong addr)
return H_HARDWARE;
}
- size = ldl_be_phys(addr + 0x4);
+ size = ldl_be_phys(cs->as, addr + 0x4);
if (size < 48) {
return H_PARAMETER;
@@ -532,21 +535,22 @@ static target_ulong h_rtas(PowerPCCPU *cpu, sPAPREnvironment *spapr,
static target_ulong h_logical_load(PowerPCCPU *cpu, sPAPREnvironment *spapr,
target_ulong opcode, target_ulong *args)
{
+ CPUState *cs = CPU(cpu);
target_ulong size = args[0];
target_ulong addr = args[1];
switch (size) {
case 1:
- args[0] = ldub_phys(addr);
+ args[0] = ldub_phys(cs->as, addr);
return H_SUCCESS;
case 2:
- args[0] = lduw_phys(addr);
+ args[0] = lduw_phys(cs->as, addr);
return H_SUCCESS;
case 4:
- args[0] = ldl_phys(addr);
+ args[0] = ldl_phys(cs->as, addr);
return H_SUCCESS;
case 8:
- args[0] = ldq_phys(addr);
+ args[0] = ldq_phys(cs->as, addr);
return H_SUCCESS;
}
return H_PARAMETER;
@@ -555,22 +559,24 @@ static target_ulong h_logical_load(PowerPCCPU *cpu, sPAPREnvironment *spapr,
static target_ulong h_logical_store(PowerPCCPU *cpu, sPAPREnvironment *spapr,
target_ulong opcode, target_ulong *args)
{
+ CPUState *cs = CPU(cpu);
+
target_ulong size = args[0];
target_ulong addr = args[1];
target_ulong val = args[2];
switch (size) {
case 1:
- stb_phys(addr, val);
+ stb_phys(cs->as, addr, val);
return H_SUCCESS;
case 2:
- stw_phys(addr, val);
+ stw_phys(cs->as, addr, val);
return H_SUCCESS;
case 4:
- stl_phys(addr, val);
+ stl_phys(cs->as, addr, val);
return H_SUCCESS;
case 8:
- stq_phys(addr, val);
+ stq_phys(cs->as, addr, val);
return H_SUCCESS;
}
return H_PARAMETER;
@@ -579,6 +585,8 @@ static target_ulong h_logical_store(PowerPCCPU *cpu, sPAPREnvironment *spapr,
static target_ulong h_logical_memop(PowerPCCPU *cpu, sPAPREnvironment *spapr,
target_ulong opcode, target_ulong *args)
{
+ CPUState *cs = CPU(cpu);
+
target_ulong dst = args[0]; /* Destination address */
target_ulong src = args[1]; /* Source address */
target_ulong esize = args[2]; /* Element size (0=1,1=2,2=4,3=8) */
@@ -605,16 +613,16 @@ static target_ulong h_logical_memop(PowerPCCPU *cpu, sPAPREnvironment *spapr,
while (count--) {
switch (esize) {
case 0:
- tmp = ldub_phys(src);
+ tmp = ldub_phys(cs->as, src);
break;
case 1:
- tmp = lduw_phys(src);
+ tmp = lduw_phys(cs->as, src);
break;
case 2:
- tmp = ldl_phys(src);
+ tmp = ldl_phys(cs->as, src);
break;
case 3:
- tmp = ldq_phys(src);
+ tmp = ldq_phys(cs->as, src);
break;
default:
return H_PARAMETER;
@@ -624,16 +632,16 @@ static target_ulong h_logical_memop(PowerPCCPU *cpu, sPAPREnvironment *spapr,
}
switch (esize) {
case 0:
- stb_phys(dst, tmp);
+ stb_phys(cs->as, dst, tmp);
break;
case 1:
- stw_phys(dst, tmp);
+ stw_phys(cs->as, dst, tmp);
break;
case 2:
- stl_phys(dst, tmp);
+ stl_phys(cs->as, dst, tmp);
break;
case 3:
- stq_phys(dst, tmp);
+ stq_phys(cs->as, dst, tmp);
break;
}
dst = dst + step;
diff --git a/hw/s390x/css.c b/hw/s390x/css.c
index 101da63d04..75b04b45af 100644
--- a/hw/s390x/css.c
+++ b/hw/s390x/css.c
@@ -11,6 +11,7 @@
#include <hw/qdev.h>
#include "qemu/bitops.h"
+#include "exec/address-spaces.h"
#include "cpu.h"
#include "ioinst.h"
#include "css.h"
@@ -667,18 +668,20 @@ static void css_update_chnmon(SubchDev *sch)
/* Format 1, per-subchannel area. */
uint32_t count;
- count = ldl_phys(sch->curr_status.mba);
+ count = ldl_phys(&address_space_memory, sch->curr_status.mba);
count++;
- stl_phys(sch->curr_status.mba, count);
+ stl_phys(&address_space_memory, sch->curr_status.mba, count);
} else {
/* Format 0, global area. */
uint32_t offset;
uint16_t count;
offset = sch->curr_status.pmcw.mbi << 5;
- count = lduw_phys(channel_subsys->chnmon_area + offset);
+ count = lduw_phys(&address_space_memory,
+ channel_subsys->chnmon_area + offset);
count++;
- stw_phys(channel_subsys->chnmon_area + offset, count);
+ stw_phys(&address_space_memory,
+ channel_subsys->chnmon_area + offset, count);
}
}
diff --git a/hw/s390x/s390-virtio-bus.c b/hw/s390x/s390-virtio-bus.c
index 46c5ff1898..e4fc35366b 100644
--- a/hw/s390x/s390-virtio-bus.c
+++ b/hw/s390x/s390-virtio-bus.c
@@ -77,10 +77,10 @@ void s390_virtio_reset_idx(VirtIOS390Device *dev)
for (i = 0; i < num_vq; i++) {
idx_addr = virtio_queue_get_avail_addr(dev->vdev, i) +
VIRTIO_VRING_AVAIL_IDX_OFFS;
- stw_phys(idx_addr, 0);
+ stw_phys(&address_space_memory, idx_addr, 0);
idx_addr = virtio_queue_get_used_addr(dev->vdev, i) +
VIRTIO_VRING_USED_IDX_OFFS;
- stw_phys(idx_addr, 0);
+ stw_phys(&address_space_memory, idx_addr, 0);
}
}
@@ -324,7 +324,7 @@ static uint64_t s390_virtio_device_vq_token(VirtIOS390Device *dev, int vq)
(vq * VIRTIO_VQCONFIG_LEN) +
VIRTIO_VQCONFIG_OFFS_TOKEN;
- return ldq_be_phys(token_off);
+ return ldq_be_phys(&address_space_memory, token_off);
}
static ram_addr_t s390_virtio_device_num_vq(VirtIOS390Device *dev)
@@ -359,15 +359,21 @@ void s390_virtio_device_sync(VirtIOS390Device *dev)
virtio_reset(dev->vdev);
/* Sync dev space */
- stb_phys(dev->dev_offs + VIRTIO_DEV_OFFS_TYPE, dev->vdev->device_id);
+ stb_phys(&address_space_memory,
+ dev->dev_offs + VIRTIO_DEV_OFFS_TYPE, dev->vdev->device_id);
- stb_phys(dev->dev_offs + VIRTIO_DEV_OFFS_NUM_VQ, s390_virtio_device_num_vq(dev));
- stb_phys(dev->dev_offs + VIRTIO_DEV_OFFS_FEATURE_LEN, dev->feat_len);
+ stb_phys(&address_space_memory,
+ dev->dev_offs + VIRTIO_DEV_OFFS_NUM_VQ,
+ s390_virtio_device_num_vq(dev));
+ stb_phys(&address_space_memory,
+ dev->dev_offs + VIRTIO_DEV_OFFS_FEATURE_LEN, dev->feat_len);
- stb_phys(dev->dev_offs + VIRTIO_DEV_OFFS_CONFIG_LEN, dev->vdev->config_len);
+ stb_phys(&address_space_memory,
+ dev->dev_offs + VIRTIO_DEV_OFFS_CONFIG_LEN, dev->vdev->config_len);
num_vq = s390_virtio_device_num_vq(dev);
- stb_phys(dev->dev_offs + VIRTIO_DEV_OFFS_NUM_VQ, num_vq);
+ stb_phys(&address_space_memory,
+ dev->dev_offs + VIRTIO_DEV_OFFS_NUM_VQ, num_vq);
/* Sync virtqueues */
for (i = 0; i < num_vq; i++) {
@@ -378,8 +384,11 @@ void s390_virtio_device_sync(VirtIOS390Device *dev)
vring = s390_virtio_next_ring(bus);
virtio_queue_set_addr(dev->vdev, i, vring);
virtio_queue_set_vector(dev->vdev, i, i);
- stq_be_phys(vq + VIRTIO_VQCONFIG_OFFS_ADDRESS, vring);
- stw_be_phys(vq + VIRTIO_VQCONFIG_OFFS_NUM, virtio_queue_get_num(dev->vdev, i));
+ stq_be_phys(&address_space_memory,
+ vq + VIRTIO_VQCONFIG_OFFS_ADDRESS, vring);
+ stw_be_phys(&address_space_memory,
+ vq + VIRTIO_VQCONFIG_OFFS_NUM,
+ virtio_queue_get_num(dev->vdev, i));
}
cur_offs = dev->dev_offs;
@@ -387,7 +396,7 @@ void s390_virtio_device_sync(VirtIOS390Device *dev)
cur_offs += num_vq * VIRTIO_VQCONFIG_LEN;
/* Sync feature bitmap */
- stl_le_phys(cur_offs, dev->host_features);
+ stl_le_phys(&address_space_memory, cur_offs, dev->host_features);
dev->feat_offs = cur_offs + dev->feat_len;
cur_offs += dev->feat_len * 2;
@@ -405,11 +414,12 @@ void s390_virtio_device_update_status(VirtIOS390Device *dev)
VirtIODevice *vdev = dev->vdev;
uint32_t features;
- virtio_set_status(vdev, ldub_phys(dev->dev_offs + VIRTIO_DEV_OFFS_STATUS));
+ virtio_set_status(vdev, ldub_phys(&address_space_memory,
+ dev->dev_offs + VIRTIO_DEV_OFFS_STATUS));
/* Update guest supported feature bitmap */
- features = bswap32(ldl_be_phys(dev->feat_offs));
+ features = bswap32(ldl_be_phys(&address_space_memory, dev->feat_offs));
virtio_set_features(vdev, features);
}
diff --git a/hw/s390x/s390-virtio.c b/hw/s390x/s390-virtio.c
index 7adf92af51..9eeda97920 100644
--- a/hw/s390x/s390-virtio.c
+++ b/hw/s390x/s390-virtio.c
@@ -91,7 +91,7 @@ static int s390_virtio_hcall_reset(const uint64_t *args)
return -EINVAL;
}
virtio_reset(dev->vdev);
- stb_phys(dev->dev_offs + VIRTIO_DEV_OFFS_STATUS, 0);
+ stb_phys(&address_space_memory, dev->dev_offs + VIRTIO_DEV_OFFS_STATUS, 0);
s390_virtio_device_sync(dev);
s390_virtio_reset_idx(dev);
diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c
index bc8871249d..f6e0e3e4ae 100644
--- a/hw/s390x/virtio-ccw.c
+++ b/hw/s390x/virtio-ccw.c
@@ -262,11 +262,14 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
if (!ccw.cda) {
ret = -EFAULT;
} else {
- info.queue = ldq_phys(ccw.cda);
- info.align = ldl_phys(ccw.cda + sizeof(info.queue));
- info.index = lduw_phys(ccw.cda + sizeof(info.queue)
+ info.queue = ldq_phys(&address_space_memory, ccw.cda);
+ info.align = ldl_phys(&address_space_memory,
+ ccw.cda + sizeof(info.queue));
+ info.index = lduw_phys(&address_space_memory,
+ ccw.cda + sizeof(info.queue)
+ sizeof(info.align));
- info.num = lduw_phys(ccw.cda + sizeof(info.queue)
+ info.num = lduw_phys(&address_space_memory,
+ ccw.cda + sizeof(info.queue)
+ sizeof(info.align)
+ sizeof(info.index));
ret = virtio_ccw_set_vqs(sch, info.queue, info.align, info.index,
@@ -293,14 +296,15 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
if (!ccw.cda) {
ret = -EFAULT;
} else {
- features.index = ldub_phys(ccw.cda + sizeof(features.features));
+ features.index = ldub_phys(&address_space_memory,
+ ccw.cda + sizeof(features.features));
if (features.index < ARRAY_SIZE(dev->host_features)) {
features.features = dev->host_features[features.index];
} else {
/* Return zeroes if the guest supports more feature bits. */
features.features = 0;
}
- stl_le_phys(ccw.cda, features.features);
+ stl_le_phys(&address_space_memory, ccw.cda, features.features);
sch->curr_status.scsw.count = ccw.count - sizeof(features);
ret = 0;
}
@@ -319,8 +323,9 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
if (!ccw.cda) {
ret = -EFAULT;
} else {
- features.index = ldub_phys(ccw.cda + sizeof(features.features));
- features.features = ldl_le_phys(ccw.cda);
+ features.index = ldub_phys(&address_space_memory,
+ ccw.cda + sizeof(features.features));
+ features.features = ldl_le_phys(&address_space_memory, ccw.cda);
if (features.index < ARRAY_SIZE(dev->host_features)) {
virtio_bus_set_vdev_features(&dev->bus, features.features);
vdev->guest_features = features.features;
@@ -397,7 +402,7 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
if (!ccw.cda) {
ret = -EFAULT;
} else {
- status = ldub_phys(ccw.cda);
+ status = ldub_phys(&address_space_memory, ccw.cda);
if (!(status & VIRTIO_CONFIG_S_DRIVER_OK)) {
virtio_ccw_stop_ioeventfd(dev);
}
@@ -426,7 +431,7 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
if (!ccw.cda) {
ret = -EFAULT;
} else {
- indicators = ldq_phys(ccw.cda);
+ indicators = ldq_phys(&address_space_memory, ccw.cda);
dev->indicators = indicators;
sch->curr_status.scsw.count = ccw.count - sizeof(indicators);
ret = 0;
@@ -446,7 +451,7 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
if (!ccw.cda) {
ret = -EFAULT;
} else {
- indicators = ldq_phys(ccw.cda);
+ indicators = ldq_phys(&address_space_memory, ccw.cda);
dev->indicators2 = indicators;
sch->curr_status.scsw.count = ccw.count - sizeof(indicators);
ret = 0;
@@ -466,10 +471,11 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
if (!ccw.cda) {
ret = -EFAULT;
} else {
- vq_config.index = lduw_phys(ccw.cda);
+ vq_config.index = lduw_phys(&address_space_memory, ccw.cda);
vq_config.num_max = virtio_queue_get_num(vdev,
vq_config.index);
- stw_phys(ccw.cda + sizeof(vq_config.index), vq_config.num_max);
+ stw_phys(&address_space_memory,
+ ccw.cda + sizeof(vq_config.index), vq_config.num_max);
sch->curr_status.scsw.count = ccw.count - sizeof(vq_config);
ret = 0;
}
@@ -866,17 +872,17 @@ static void virtio_ccw_notify(DeviceState *d, uint16_t vector)
if (!dev->indicators) {
return;
}
- indicators = ldq_phys(dev->indicators);
+ indicators = ldq_phys(&address_space_memory, dev->indicators);
indicators |= 1ULL << vector;
- stq_phys(dev->indicators, indicators);
+ stq_phys(&address_space_memory, dev->indicators, indicators);
} else {
if (!dev->indicators2) {
return;
}
vector = 0;
- indicators = ldq_phys(dev->indicators2);
+ indicators = ldq_phys(&address_space_memory, dev->indicators2);
indicators |= 1ULL << vector;
- stq_phys(dev->indicators2, indicators);
+ stq_phys(&address_space_memory, dev->indicators2, indicators);
}
css_conditional_io_interrupt(sch);
diff --git a/hw/scsi/megasas.c b/hw/scsi/megasas.c
index 7c5a1a2b3a..59570e2b2b 100644
--- a/hw/scsi/megasas.c
+++ b/hw/scsi/megasas.c
@@ -144,12 +144,14 @@ static bool megasas_is_jbod(MegasasState *s)
static void megasas_frame_set_cmd_status(unsigned long frame, uint8_t v)
{
- stb_phys(frame + offsetof(struct mfi_frame_header, cmd_status), v);
+ stb_phys(&address_space_memory,
+ frame + offsetof(struct mfi_frame_header, cmd_status), v);
}
static void megasas_frame_set_scsi_status(unsigned long frame, uint8_t v)
{
- stb_phys(frame + offsetof(struct mfi_frame_header, scsi_status), v);
+ stb_phys(&address_space_memory,
+ frame + offsetof(struct mfi_frame_header, scsi_status), v);
}
/*
@@ -158,7 +160,8 @@ static void megasas_frame_set_scsi_status(unsigned long frame, uint8_t v)
*/
static uint64_t megasas_frame_get_context(unsigned long frame)
{
- return ldq_le_phys(frame + offsetof(struct mfi_frame_header, context));
+ return ldq_le_phys(&address_space_memory,
+ frame + offsetof(struct mfi_frame_header, context));
}
static bool megasas_frame_is_ieee_sgl(MegasasCmd *cmd)
@@ -516,10 +519,12 @@ static void megasas_complete_frame(MegasasState *s, uint64_t context)
tail = s->reply_queue_head;
if (megasas_use_queue64(s)) {
queue_offset = tail * sizeof(uint64_t);
- stq_le_phys(s->reply_queue_pa + queue_offset, context);
+ stq_le_phys(&address_space_memory,
+ s->reply_queue_pa + queue_offset, context);
} else {
queue_offset = tail * sizeof(uint32_t);
- stl_le_phys(s->reply_queue_pa + queue_offset, context);
+ stl_le_phys(&address_space_memory,
+ s->reply_queue_pa + queue_offset, context);
}
s->reply_queue_head = megasas_next_index(s, tail, s->fw_cmds);
trace_megasas_qf_complete(context, tail, queue_offset,
@@ -602,8 +607,8 @@ static int megasas_init_firmware(MegasasState *s, MegasasCmd *cmd)
pa_lo = le32_to_cpu(initq->pi_addr_lo);
pa_hi = le32_to_cpu(initq->pi_addr_hi);
s->producer_pa = ((uint64_t) pa_hi << 32) | pa_lo;
- s->reply_queue_head = ldl_le_phys(s->producer_pa);
- s->reply_queue_tail = ldl_le_phys(s->consumer_pa);
+ s->reply_queue_head = ldl_le_phys(&address_space_memory, s->producer_pa);
+ s->reply_queue_tail = ldl_le_phys(&address_space_memory, s->consumer_pa);
flags = le32_to_cpu(initq->flags);
if (flags & MFI_QUEUE_FLAG_CONTEXT64) {
s->flags |= MEGASAS_MASK_USE_QUEUE64;
@@ -1949,7 +1954,8 @@ static void megasas_mmio_write(void *opaque, hwaddr addr,
if (s->producer_pa && megasas_intr_enabled(s)) {
/* Update reply queue pointer */
trace_megasas_qf_update(s->reply_queue_head, s->busy);
- stl_le_phys(s->producer_pa, s->reply_queue_head);
+ stl_le_phys(&address_space_memory,
+ s->producer_pa, s->reply_queue_head);
if (!msix_enabled(pci_dev)) {
trace_megasas_irq_lower();
pci_irq_deassert(pci_dev);
diff --git a/hw/scsi/vmw_pvscsi.c b/hw/scsi/vmw_pvscsi.c
index 94b328f186..7d344b944e 100644
--- a/hw/scsi/vmw_pvscsi.c
+++ b/hw/scsi/vmw_pvscsi.c
@@ -43,9 +43,11 @@
(sizeof(PVSCSICmdDescSetupRings)/sizeof(uint32_t))
#define RS_GET_FIELD(rs_pa, field) \
- (ldl_le_phys(rs_pa + offsetof(struct PVSCSIRingsState, field)))
+ (ldl_le_phys(&address_space_memory, \
+ rs_pa + offsetof(struct PVSCSIRingsState, field)))
#define RS_SET_FIELD(rs_pa, field, val) \
- (stl_le_phys(rs_pa + offsetof(struct PVSCSIRingsState, field), val))
+ (stl_le_phys(&address_space_memory, \
+ rs_pa + offsetof(struct PVSCSIRingsState, field), val))
#define TYPE_PVSCSI "pvscsi"
#define PVSCSI(obj) OBJECT_CHECK(PVSCSIState, (obj), TYPE_PVSCSI)
diff --git a/hw/sh4/r2d.c b/hw/sh4/r2d.c
index 7b1de85835..eaeb7ede4e 100644
--- a/hw/sh4/r2d.c
+++ b/hw/sh4/r2d.c
@@ -318,8 +318,8 @@ static void r2d_init(QEMUMachineInitArgs *args)
}
/* initialization which should be done by firmware */
- stl_phys(SH7750_BCR1, 1<<3); /* cs3 SDRAM */
- stw_phys(SH7750_BCR2, 3<<(3*2)); /* cs3 32bit */
+ stl_phys(&address_space_memory, SH7750_BCR1, 1<<3); /* cs3 SDRAM */
+ stw_phys(&address_space_memory, SH7750_BCR2, 3<<(3*2)); /* cs3 32bit */
reset_info->vector = (SDRAM_BASE + LINUX_LOAD_OFFSET) | 0xa0000000; /* Start from P2 area */
}
diff --git a/hw/sparc/sun4m.c b/hw/sparc/sun4m.c
index 94f79508d8..2957d90177 100644
--- a/hw/sparc/sun4m.c
+++ b/hw/sparc/sun4m.c
@@ -577,7 +577,8 @@ static void idreg_init(hwaddr addr)
s = SYS_BUS_DEVICE(dev);
sysbus_mmio_map(s, 0, addr);
- cpu_physical_memory_write_rom(addr, idreg_data, sizeof(idreg_data));
+ cpu_physical_memory_write_rom(&address_space_memory,
+ addr, idreg_data, sizeof(idreg_data));
}
#define MACIO_ID_REGISTER(obj) \
diff --git a/hw/timer/hpet.c b/hw/timer/hpet.c
index 2fbbeb1735..1264dfd46a 100644
--- a/hw/timer/hpet.c
+++ b/hw/timer/hpet.c
@@ -206,7 +206,8 @@ static void update_irq(struct HPETTimer *timer, int set)
}
}
} else if (timer_fsb_route(timer)) {
- stl_le_phys(timer->fsb >> 32, timer->fsb & 0xffffffff);
+ stl_le_phys(&address_space_memory,
+ timer->fsb >> 32, timer->fsb & 0xffffffff);
} else if (timer->config & HPET_TN_TYPE_LEVEL) {
s->isr |= mask;
/* fold the ICH PIRQ# pin's internal inversion logic into hpet */
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index a001e668c4..aeabf3a459 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -14,6 +14,7 @@
#include <inttypes.h>
#include "trace.h"
+#include "exec/address-spaces.h"
#include "qemu/error-report.h"
#include "hw/virtio/virtio.h"
#include "qemu/atomic.h"
@@ -104,49 +105,49 @@ static inline uint64_t vring_desc_addr(hwaddr desc_pa, int i)
{
hwaddr pa;
pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, addr);
- return ldq_phys(pa);
+ return ldq_phys(&address_space_memory, pa);
}
static inline uint32_t vring_desc_len(hwaddr desc_pa, int i)
{
hwaddr pa;
pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, len);
- return ldl_phys(pa);
+ return ldl_phys(&address_space_memory, pa);
}
static inline uint16_t vring_desc_flags(hwaddr desc_pa, int i)
{
hwaddr pa;
pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, flags);
- return lduw_phys(pa);
+ return lduw_phys(&address_space_memory, pa);
}
static inline uint16_t vring_desc_next(hwaddr desc_pa, int i)
{
hwaddr pa;
pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, next);
- return lduw_phys(pa);
+ return lduw_phys(&address_space_memory, pa);
}
static inline uint16_t vring_avail_flags(VirtQueue *vq)
{
hwaddr pa;
pa = vq->vring.avail + offsetof(VRingAvail, flags);
- return lduw_phys(pa);
+ return lduw_phys(&address_space_memory, pa);
}
static inline uint16_t vring_avail_idx(VirtQueue *vq)
{
hwaddr pa;
pa = vq->vring.avail + offsetof(VRingAvail, idx);
- return lduw_phys(pa);
+ return lduw_phys(&address_space_memory, pa);
}
static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
{
hwaddr pa;
pa = vq->vring.avail + offsetof(VRingAvail, ring[i]);
- return lduw_phys(pa);
+ return lduw_phys(&address_space_memory, pa);
}
static inline uint16_t vring_used_event(VirtQueue *vq)
@@ -158,42 +159,44 @@ static inline void vring_used_ring_id(VirtQueue *vq, int i, uint32_t val)
{
hwaddr pa;
pa = vq->vring.used + offsetof(VRingUsed, ring[i].id);
- stl_phys(pa, val);
+ stl_phys(&address_space_memory, pa, val);
}
static inline void vring_used_ring_len(VirtQueue *vq, int i, uint32_t val)
{
hwaddr pa;
pa = vq->vring.used + offsetof(VRingUsed, ring[i].len);
- stl_phys(pa, val);
+ stl_phys(&address_space_memory, pa, val);
}
static uint16_t vring_used_idx(VirtQueue *vq)
{
hwaddr pa;
pa = vq->vring.used + offsetof(VRingUsed, idx);
- return lduw_phys(pa);
+ return lduw_phys(&address_space_memory, pa);
}
static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val)
{
hwaddr pa;
pa = vq->vring.used + offsetof(VRingUsed, idx);
- stw_phys(pa, val);
+ stw_phys(&address_space_memory, pa, val);
}
static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
{
hwaddr pa;
pa = vq->vring.used + offsetof(VRingUsed, flags);
- stw_phys(pa, lduw_phys(pa) | mask);
+ stw_phys(&address_space_memory,
+ pa, lduw_phys(&address_space_memory, pa) | mask);
}
static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
{
hwaddr pa;
pa = vq->vring.used + offsetof(VRingUsed, flags);
- stw_phys(pa, lduw_phys(pa) & ~mask);
+ stw_phys(&address_space_memory,
+ pa, lduw_phys(&address_space_memory, pa) & ~mask);
}
static inline void vring_avail_event(VirtQueue *vq, uint16_t val)
@@ -203,7 +206,7 @@ static inline void vring_avail_event(VirtQueue *vq, uint16_t val)
return;
}
pa = vq->vring.used + offsetof(VRingUsed, ring[vq->vring.num]);
- stw_phys(pa, val);
+ stw_phys(&address_space_memory, pa, val);
}
void virtio_queue_set_notification(VirtQueue *vq, int enable)
diff --git a/include/disas/bfd.h b/include/disas/bfd.h
index 803b6efe41..8bd703cb1a 100644
--- a/include/disas/bfd.h
+++ b/include/disas/bfd.h
@@ -379,6 +379,7 @@ int print_insn_h8300 (bfd_vma, disassemble_info*);
int print_insn_h8300h (bfd_vma, disassemble_info*);
int print_insn_h8300s (bfd_vma, disassemble_info*);
int print_insn_h8500 (bfd_vma, disassemble_info*);
+int print_insn_arm_a64 (bfd_vma, disassemble_info*);
int print_insn_alpha (bfd_vma, disassemble_info*);
disassembler_ftype arc_get_disassembler (int, int);
int print_insn_arm (bfd_vma, disassemble_info*);
diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h
index 8f33122c9f..a21b65a893 100644
--- a/include/exec/cpu-common.h
+++ b/include/exec/cpu-common.h
@@ -83,32 +83,32 @@ bool cpu_physical_memory_is_io(hwaddr phys_addr);
*/
void qemu_flush_coalesced_mmio_buffer(void);
-uint32_t ldub_phys(hwaddr addr);
-uint32_t lduw_le_phys(hwaddr addr);
-uint32_t lduw_be_phys(hwaddr addr);
-uint32_t ldl_le_phys(hwaddr addr);
-uint32_t ldl_be_phys(hwaddr addr);
-uint64_t ldq_le_phys(hwaddr addr);
-uint64_t ldq_be_phys(hwaddr addr);
-void stb_phys(hwaddr addr, uint32_t val);
-void stw_le_phys(hwaddr addr, uint32_t val);
-void stw_be_phys(hwaddr addr, uint32_t val);
-void stl_le_phys(hwaddr addr, uint32_t val);
-void stl_be_phys(hwaddr addr, uint32_t val);
-void stq_le_phys(hwaddr addr, uint64_t val);
-void stq_be_phys(hwaddr addr, uint64_t val);
+uint32_t ldub_phys(AddressSpace *as, hwaddr addr);
+uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr);
+uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr);
+uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr);
+uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr);
+uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr);
+uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr);
+void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val);
+void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val);
+void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val);
+void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val);
+void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val);
+void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val);
+void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val);
#ifdef NEED_CPU_H
-uint32_t lduw_phys(hwaddr addr);
-uint32_t ldl_phys(hwaddr addr);
-uint64_t ldq_phys(hwaddr addr);
-void stl_phys_notdirty(hwaddr addr, uint32_t val);
-void stw_phys(hwaddr addr, uint32_t val);
-void stl_phys(hwaddr addr, uint32_t val);
-void stq_phys(hwaddr addr, uint64_t val);
+uint32_t lduw_phys(AddressSpace *as, hwaddr addr);
+uint32_t ldl_phys(AddressSpace *as, hwaddr addr);
+uint64_t ldq_phys(AddressSpace *as, hwaddr addr);
+void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val);
+void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val);
+void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val);
+void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val);
#endif
-void cpu_physical_memory_write_rom(hwaddr addr,
+void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
const uint8_t *buf, int len);
void cpu_flush_icache_range(hwaddr start, int len);
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index 3b03cbfcf8..a387922df4 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -96,13 +96,14 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
int is_cpu_write_access);
#if !defined(CONFIG_USER_ONLY)
+void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as);
/* cputlb.c */
void tlb_flush_page(CPUArchState *env, target_ulong addr);
void tlb_flush(CPUArchState *env, int flush_global);
void tlb_set_page(CPUArchState *env, target_ulong vaddr,
hwaddr paddr, int prot,
int mmu_idx, target_ulong size);
-void tb_invalidate_phys_addr(hwaddr addr);
+void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr);
#else
static inline void tlb_flush_page(CPUArchState *env, target_ulong addr)
{
@@ -325,7 +326,7 @@ extern uintptr_t tci_tb_ptr;
void phys_mem_set_alloc(void *(*alloc)(size_t));
-struct MemoryRegion *iotlb_to_region(hwaddr index);
+struct MemoryRegion *iotlb_to_region(AddressSpace *as, hwaddr index);
bool io_mem_read(struct MemoryRegion *mr, hwaddr addr,
uint64_t *pvalue, unsigned size);
bool io_mem_write(struct MemoryRegion *mr, hwaddr addr,
diff --git a/include/exec/memory.h b/include/exec/memory.h
index 296d6ab2f4..9101fc3a55 100644
--- a/include/exec/memory.h
+++ b/include/exec/memory.h
@@ -163,8 +163,6 @@ struct MemoryRegion {
NotifierList iommu_notify;
};
-typedef struct MemoryListener MemoryListener;
-
/**
* MemoryListener: callbacks structure for updates to the physical memory map
*
diff --git a/include/exec/softmmu_template.h b/include/exec/softmmu_template.h
index 8712dcd091..c14a04d7e9 100644
--- a/include/exec/softmmu_template.h
+++ b/include/exec/softmmu_template.h
@@ -22,6 +22,7 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/timer.h"
+#include "exec/address-spaces.h"
#include "exec/memory.h"
#define DATA_SIZE (1 << SHIFT)
@@ -121,7 +122,8 @@ static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
uintptr_t retaddr)
{
uint64_t val;
- MemoryRegion *mr = iotlb_to_region(physaddr);
+ CPUState *cpu = ENV_GET_CPU(env);
+ MemoryRegion *mr = iotlb_to_region(cpu->as, physaddr);
physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
env->mem_io_pc = retaddr;
@@ -327,7 +329,8 @@ static inline void glue(io_write, SUFFIX)(CPUArchState *env,
target_ulong addr,
uintptr_t retaddr)
{
- MemoryRegion *mr = iotlb_to_region(physaddr);
+ CPUState *cpu = ENV_GET_CPU(env);
+ MemoryRegion *mr = iotlb_to_region(cpu->as, physaddr);
physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
if (mr != &io_mem_rom && mr != &io_mem_notdirty && !can_do_io(env)) {
diff --git a/include/hw/arm/allwinner-a10.h b/include/hw/arm/allwinner-a10.h
index da36647f32..01a189bcdc 100644
--- a/include/hw/arm/allwinner-a10.h
+++ b/include/hw/arm/allwinner-a10.h
@@ -6,6 +6,7 @@
#include "hw/arm/arm.h"
#include "hw/timer/allwinner-a10-pit.h"
#include "hw/intc/allwinner-a10-pic.h"
+#include "hw/net/allwinner_emac.h"
#include "sysemu/sysemu.h"
#include "exec/address-spaces.h"
@@ -14,6 +15,7 @@
#define AW_A10_PIC_REG_BASE 0x01c20400
#define AW_A10_PIT_REG_BASE 0x01c20c00
#define AW_A10_UART0_REG_BASE 0x01c28000
+#define AW_A10_EMAC_BASE 0x01c0b000
#define AW_A10_SDRAM_BASE 0x40000000
@@ -29,6 +31,7 @@ typedef struct AwA10State {
qemu_irq irq[AW_A10_PIC_INT_NR];
AwA10PITState timer;
AwA10PICState intc;
+ AwEmacState emac;
} AwA10State;
#define ALLWINNER_H_
diff --git a/include/hw/intc/arm_gic_common.h b/include/hw/intc/arm_gic_common.h
index 8a2aa00cee..89384c2bb4 100644
--- a/include/hw/intc/arm_gic_common.h
+++ b/include/hw/intc/arm_gic_common.h
@@ -31,6 +31,9 @@
/* Maximum number of possible CPU interfaces, determined by GIC architecture */
#define GIC_NCPU 8
+#define MAX_NR_GROUP_PRIO 128
+#define GIC_NR_APRS (MAX_NR_GROUP_PRIO / 32)
+
typedef struct gic_irq_state {
/* The enable bits are only banked for per-cpu interrupts. */
uint8_t enabled;
@@ -55,12 +58,42 @@ typedef struct GICState {
uint8_t priority1[GIC_INTERNAL][GIC_NCPU];
uint8_t priority2[GIC_MAXIRQ - GIC_INTERNAL];
uint16_t last_active[GIC_MAXIRQ][GIC_NCPU];
+ /* For each SGI on the target CPU, we store 8 bits
+ * indicating which source CPUs have made this SGI
+ * pending on the target CPU. These correspond to
+ * the bytes in the GIC_SPENDSGIR* registers as
+ * read by the target CPU.
+ */
+ uint8_t sgi_pending[GIC_NR_SGIS][GIC_NCPU];
uint16_t priority_mask[GIC_NCPU];
uint16_t running_irq[GIC_NCPU];
uint16_t running_priority[GIC_NCPU];
uint16_t current_pending[GIC_NCPU];
+ /* We present the GICv2 without security extensions to a guest and
+ * therefore the guest can configure the GICC_CTLR to configure group 1
+ * binary point in the abpr.
+ */
+ uint8_t bpr[GIC_NCPU];
+ uint8_t abpr[GIC_NCPU];
+
+ /* The APR is implementation defined, so we choose a layout identical to
+ * the KVM ABI layout for QEMU's implementation of the gic:
+ * If an interrupt for preemption level X is active, then
+ * APRn[X mod 32] == 0b1, where n = X / 32
+ * otherwise the bit is clear.
+ *
+ * TODO: rewrite the interrupt acknowlege/complete routines to use
+ * the APR registers to track the necessary information to update
+ * s->running_priority[] on interrupt completion (ie completely remove
+ * last_active[][] and running_irq[]). This will be necessary if we ever
+ * want to support TCG<->KVM migration, or TCG guests which can
+ * do power management involving powering down and restarting
+ * the GIC.
+ */
+ uint32_t apr[GIC_NR_APRS][GIC_NCPU];
+
uint32_t num_cpu;
MemoryRegion iomem; /* Distributor */
diff --git a/include/hw/net/allwinner_emac.h b/include/hw/net/allwinner_emac.h
new file mode 100644
index 0000000000..a5e944af05
--- /dev/null
+++ b/include/hw/net/allwinner_emac.h
@@ -0,0 +1,210 @@
+/*
+ * Emulation of Allwinner EMAC Fast Ethernet controller and
+ * Realtek RTL8201CP PHY
+ *
+ * Copyright (C) 2014 Beniamino Galvani <b.galvani@gmail.com>
+ *
+ * Allwinner EMAC register definitions from Linux kernel are:
+ * Copyright 2012 Stefan Roese <sr@denx.de>
+ * Copyright 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
+ * Copyright 1997 Sten Wang
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef AW_EMAC_H
+#define AW_EMAC_H
+
+#include "net/net.h"
+#include "qemu/fifo8.h"
+
+#define TYPE_AW_EMAC "allwinner-emac"
+#define AW_EMAC(obj) OBJECT_CHECK(AwEmacState, (obj), TYPE_AW_EMAC)
+
+/*
+ * Allwinner EMAC register list
+ */
+#define EMAC_CTL_REG 0x00
+
+#define EMAC_TX_MODE_REG 0x04
+#define EMAC_TX_FLOW_REG 0x08
+#define EMAC_TX_CTL0_REG 0x0C
+#define EMAC_TX_CTL1_REG 0x10
+#define EMAC_TX_INS_REG 0x14
+#define EMAC_TX_PL0_REG 0x18
+#define EMAC_TX_PL1_REG 0x1C
+#define EMAC_TX_STA_REG 0x20
+#define EMAC_TX_IO_DATA_REG 0x24
+#define EMAC_TX_IO_DATA1_REG 0x28
+#define EMAC_TX_TSVL0_REG 0x2C
+#define EMAC_TX_TSVH0_REG 0x30
+#define EMAC_TX_TSVL1_REG 0x34
+#define EMAC_TX_TSVH1_REG 0x38
+
+#define EMAC_RX_CTL_REG 0x3C
+#define EMAC_RX_HASH0_REG 0x40
+#define EMAC_RX_HASH1_REG 0x44
+#define EMAC_RX_STA_REG 0x48
+#define EMAC_RX_IO_DATA_REG 0x4C
+#define EMAC_RX_FBC_REG 0x50
+
+#define EMAC_INT_CTL_REG 0x54
+#define EMAC_INT_STA_REG 0x58
+
+#define EMAC_MAC_CTL0_REG 0x5C
+#define EMAC_MAC_CTL1_REG 0x60
+#define EMAC_MAC_IPGT_REG 0x64
+#define EMAC_MAC_IPGR_REG 0x68
+#define EMAC_MAC_CLRT_REG 0x6C
+#define EMAC_MAC_MAXF_REG 0x70
+#define EMAC_MAC_SUPP_REG 0x74
+#define EMAC_MAC_TEST_REG 0x78
+#define EMAC_MAC_MCFG_REG 0x7C
+#define EMAC_MAC_MCMD_REG 0x80
+#define EMAC_MAC_MADR_REG 0x84
+#define EMAC_MAC_MWTD_REG 0x88
+#define EMAC_MAC_MRDD_REG 0x8C
+#define EMAC_MAC_MIND_REG 0x90
+#define EMAC_MAC_SSRR_REG 0x94
+#define EMAC_MAC_A0_REG 0x98
+#define EMAC_MAC_A1_REG 0x9C
+#define EMAC_MAC_A2_REG 0xA0
+
+#define EMAC_SAFX_L_REG0 0xA4
+#define EMAC_SAFX_H_REG0 0xA8
+#define EMAC_SAFX_L_REG1 0xAC
+#define EMAC_SAFX_H_REG1 0xB0
+#define EMAC_SAFX_L_REG2 0xB4
+#define EMAC_SAFX_H_REG2 0xB8
+#define EMAC_SAFX_L_REG3 0xBC
+#define EMAC_SAFX_H_REG3 0xC0
+
+/* CTL register fields */
+#define EMAC_CTL_RESET (1 << 0)
+#define EMAC_CTL_TX_EN (1 << 1)
+#define EMAC_CTL_RX_EN (1 << 2)
+
+/* TX MODE register fields */
+#define EMAC_TX_MODE_ABORTED_FRAME_EN (1 << 0)
+#define EMAC_TX_MODE_DMA_EN (1 << 1)
+
+/* RX CTL register fields */
+#define EMAC_RX_CTL_AUTO_DRQ_EN (1 << 1)
+#define EMAC_RX_CTL_DMA_EN (1 << 2)
+#define EMAC_RX_CTL_PASS_ALL_EN (1 << 4)
+#define EMAC_RX_CTL_PASS_CTL_EN (1 << 5)
+#define EMAC_RX_CTL_PASS_CRC_ERR_EN (1 << 6)
+#define EMAC_RX_CTL_PASS_LEN_ERR_EN (1 << 7)
+#define EMAC_RX_CTL_PASS_LEN_OOR_EN (1 << 8)
+#define EMAC_RX_CTL_ACCEPT_UNICAST_EN (1 << 16)
+#define EMAC_RX_CTL_DA_FILTER_EN (1 << 17)
+#define EMAC_RX_CTL_ACCEPT_MULTICAST_EN (1 << 20)
+#define EMAC_RX_CTL_HASH_FILTER_EN (1 << 21)
+#define EMAC_RX_CTL_ACCEPT_BROADCAST_EN (1 << 22)
+#define EMAC_RX_CTL_SA_FILTER_EN (1 << 24)
+#define EMAC_RX_CTL_SA_FILTER_INVERT_EN (1 << 25)
+
+/* RX IO DATA register fields */
+#define EMAC_RX_HEADER(len, status) (((len) & 0xffff) | ((status) << 16))
+#define EMAC_RX_IO_DATA_STATUS_CRC_ERR (1 << 4)
+#define EMAC_RX_IO_DATA_STATUS_LEN_ERR (3 << 5)
+#define EMAC_RX_IO_DATA_STATUS_OK (1 << 7)
+#define EMAC_UNDOCUMENTED_MAGIC 0x0143414d /* header for RX frames */
+
+/* PHY registers */
+#define MII_BMCR 0
+#define MII_BMSR 1
+#define MII_PHYID1 2
+#define MII_PHYID2 3
+#define MII_ANAR 4
+#define MII_ANLPAR 5
+#define MII_ANER 6
+#define MII_NSR 16
+#define MII_LBREMR 17
+#define MII_REC 18
+#define MII_SNRDR 19
+#define MII_TEST 25
+
+/* PHY registers fields */
+#define MII_BMCR_RESET (1 << 15)
+#define MII_BMCR_LOOPBACK (1 << 14)
+#define MII_BMCR_SPEED (1 << 13)
+#define MII_BMCR_AUTOEN (1 << 12)
+#define MII_BMCR_FD (1 << 8)
+
+#define MII_BMSR_100TX_FD (1 << 14)
+#define MII_BMSR_100TX_HD (1 << 13)
+#define MII_BMSR_10T_FD (1 << 12)
+#define MII_BMSR_10T_HD (1 << 11)
+#define MII_BMSR_MFPS (1 << 6)
+#define MII_BMSR_AUTONEG (1 << 3)
+#define MII_BMSR_LINK_ST (1 << 2)
+
+#define MII_ANAR_TXFD (1 << 8)
+#define MII_ANAR_TX (1 << 7)
+#define MII_ANAR_10FD (1 << 6)
+#define MII_ANAR_10 (1 << 5)
+#define MII_ANAR_CSMACD (1 << 0)
+
+#define RTL8201CP_PHYID1 0x0000
+#define RTL8201CP_PHYID2 0x8201
+
+/* INT CTL and INT STA registers fields */
+#define EMAC_INT_TX_CHAN(x) (1 << (x))
+#define EMAC_INT_RX (1 << 8)
+
+/* Due to lack of specifications, size of fifos is chosen arbitrarily */
+#define TX_FIFO_SIZE (4 * 1024)
+#define RX_FIFO_SIZE (32 * 1024)
+
+#define NUM_TX_FIFOS 2
+#define RX_HDR_SIZE 8
+#define CRC_SIZE 4
+
+#define PHY_REG_SHIFT 0
+#define PHY_ADDR_SHIFT 8
+
+typedef struct RTL8201CPState {
+ uint16_t bmcr;
+ uint16_t bmsr;
+ uint16_t anar;
+ uint16_t anlpar;
+} RTL8201CPState;
+
+typedef struct AwEmacState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+ /*< public >*/
+
+ MemoryRegion iomem;
+ qemu_irq irq;
+ NICState *nic;
+ NICConf conf;
+ RTL8201CPState mii;
+ uint8_t phy_addr;
+
+ uint32_t ctl;
+ uint32_t tx_mode;
+ uint32_t rx_ctl;
+ uint32_t int_ctl;
+ uint32_t int_sta;
+ uint32_t phy_target;
+
+ Fifo8 rx_fifo;
+ uint32_t rx_num_packets;
+ uint32_t rx_packet_size;
+ uint32_t rx_packet_pos;
+
+ Fifo8 tx_fifo[NUM_TX_FIFOS];
+ uint32_t tx_length[NUM_TX_FIFOS];
+ uint32_t tx_channel;
+} AwEmacState;
+
+#endif
diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h
index b2f11e9a2c..449fc7ca2d 100644
--- a/include/hw/ppc/spapr.h
+++ b/include/hw/ppc/spapr.h
@@ -348,12 +348,12 @@ static inline uint64_t ppc64_phys_to_real(uint64_t addr)
static inline uint32_t rtas_ld(target_ulong phys, int n)
{
- return ldl_be_phys(ppc64_phys_to_real(phys + 4*n));
+ return ldl_be_phys(&address_space_memory, ppc64_phys_to_real(phys + 4*n));
}
static inline void rtas_st(target_ulong phys, int n, uint32_t val)
{
- stl_be_phys(ppc64_phys_to_real(phys + 4*n), val);
+ stl_be_phys(&address_space_memory, ppc64_phys_to_real(phys + 4*n), val);
}
typedef void (*spapr_rtas_fn)(PowerPCCPU *cpu, sPAPREnvironment *spapr,
diff --git a/include/migration/vmstate.h b/include/migration/vmstate.h
index fbd16a03e6..ded8e2302f 100644
--- a/include/migration/vmstate.h
+++ b/include/migration/vmstate.h
@@ -656,9 +656,15 @@ extern const VMStateInfo vmstate_info_bitmap;
#define VMSTATE_UINT32_ARRAY_V(_f, _s, _n, _v) \
VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_uint32, uint32_t)
+#define VMSTATE_UINT32_2DARRAY_V(_f, _s, _n1, _n2, _v) \
+ VMSTATE_2DARRAY(_f, _s, _n1, _n2, _v, vmstate_info_uint32, uint32_t)
+
#define VMSTATE_UINT32_ARRAY(_f, _s, _n) \
VMSTATE_UINT32_ARRAY_V(_f, _s, _n, 0)
+#define VMSTATE_UINT32_2DARRAY(_f, _s, _n1, _n2) \
+ VMSTATE_UINT32_2DARRAY_V(_f, _s, _n1, _n2, 0)
+
#define VMSTATE_UINT64_ARRAY_V(_f, _s, _n, _v) \
VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_uint64, uint64_t)
diff --git a/include/qemu/fifo8.h b/include/qemu/fifo8.h
index d318f71e11..8820780669 100644
--- a/include/qemu/fifo8.h
+++ b/include/qemu/fifo8.h
@@ -44,6 +44,19 @@ void fifo8_destroy(Fifo8 *fifo);
void fifo8_push(Fifo8 *fifo, uint8_t data);
/**
+ * fifo8_push_all:
+ * @fifo: FIFO to push to
+ * @data: data to push
+ * @size: number of bytes to push
+ *
+ * Push a byte array to the FIFO. Behaviour is undefined if the FIFO is full.
+ * Clients are responsible for checking the space left in the FIFO using
+ * fifo8_num_free().
+ */
+
+void fifo8_push_all(Fifo8 *fifo, const uint8_t *data, uint32_t num);
+
+/**
* fifo8_pop:
* @fifo: fifo to pop from
*
@@ -56,6 +69,32 @@ void fifo8_push(Fifo8 *fifo, uint8_t data);
uint8_t fifo8_pop(Fifo8 *fifo);
/**
+ * fifo8_pop_buf:
+ * @fifo: FIFO to pop from
+ * @max: maximum number of bytes to pop
+ * @num: actual number of returned bytes
+ *
+ * Pop a number of elements from the FIFO up to a maximum of max. The buffer
+ * containing the popped data is returned. This buffer points directly into
+ * the FIFO backing store and data is invalidated once any of the fifo8_* APIs
+ * are called on the FIFO.
+ *
+ * The function may return fewer bytes than requested when the data wraps
+ * around in the ring buffer; in this case only a contiguous part of the data
+ * is returned.
+ *
+ * The number of valid bytes returned is populated in *num; will always return
+ * at least 1 byte. max must not be 0 or greater than the number of bytes in
+ * the FIFO.
+ *
+ * Clients are responsible for checking the availability of requested data
+ * using fifo8_num_used().
+ *
+ * Returns: A pointer to popped data.
+ */
+const uint8_t *fifo8_pop_buf(Fifo8 *fifo, uint32_t max, uint32_t *num);
+
+/**
* fifo8_reset:
* @fifo: FIFO to reset
*
@@ -86,6 +125,28 @@ bool fifo8_is_empty(Fifo8 *fifo);
bool fifo8_is_full(Fifo8 *fifo);
+/**
+ * fifo8_num_free:
+ * @fifo: FIFO to check
+ *
+ * Return the number of free bytes in the FIFO.
+ *
+ * Returns: Number of free bytes.
+ */
+
+uint32_t fifo8_num_free(Fifo8 *fifo);
+
+/**
+ * fifo8_num_used:
+ * @fifo: FIFO to check
+ *
+ * Return the number of used bytes in the FIFO.
+ *
+ * Returns: Number of used bytes.
+ */
+
+uint32_t fifo8_num_used(Fifo8 *fifo);
+
extern const VMStateDescription vmstate_fifo8;
#define VMSTATE_FIFO8(_field, _state) { \
diff --git a/include/qemu/typedefs.h b/include/qemu/typedefs.h
index 45244960b5..5b4e333fc1 100644
--- a/include/qemu/typedefs.h
+++ b/include/qemu/typedefs.h
@@ -26,6 +26,7 @@ typedef struct BusClass BusClass;
typedef struct AddressSpace AddressSpace;
typedef struct MemoryRegion MemoryRegion;
typedef struct MemoryRegionSection MemoryRegionSection;
+typedef struct MemoryListener MemoryListener;
typedef struct MemoryMappingList MemoryMappingList;
diff --git a/include/qom/cpu.h b/include/qom/cpu.h
index 7739e00067..367eda17d1 100644
--- a/include/qom/cpu.h
+++ b/include/qom/cpu.h
@@ -186,6 +186,9 @@ struct CPUState {
uint32_t interrupt_request;
int singlestep_enabled;
+ AddressSpace *as;
+ MemoryListener *tcg_as_listener;
+
void *env_ptr; /* CPUArchState */
struct TranslationBlock *current_tb;
struct GDBRegisterState *gdb_regs;
diff --git a/monitor.c b/monitor.c
index b1ea262e8b..690c152d0b 100644
--- a/monitor.c
+++ b/monitor.c
@@ -1445,7 +1445,7 @@ static void do_sum(Monitor *mon, const QDict *qdict)
sum = 0;
for(addr = start; addr < (start + size); addr++) {
- uint8_t val = ldub_phys(addr);
+ uint8_t val = ldub_phys(&address_space_memory, addr);
/* BSD sum algorithm ('sum' Unix command) */
sum = (sum >> 1) | (sum << 15);
sum += val;
diff --git a/rules.mak b/rules.mak
index 49edb9bf07..391d6eb8e6 100644
--- a/rules.mak
+++ b/rules.mak
@@ -8,6 +8,7 @@ MAKEFLAGS += -rR
%.d:
%.h:
%.c:
+%.cc:
%.cpp:
%.m:
%.mak:
@@ -26,8 +27,12 @@ QEMU_INCLUDES += -I$(<D) -I$(@D)
%.o: %.rc
$(call quiet-command,$(WINDRES) -I. -o $@ $<," RC $(TARGET_DIR)$@")
+# If we have a CXX we might have some C++ objects, in which case we
+# must link with the C++ compiler, not the plain C compiler.
+LINKPROG = $(or $(CXX),$(CC))
+
ifeq ($(LIBTOOL),)
-LINK = $(call quiet-command,$(CC) $(QEMU_CFLAGS) $(CFLAGS) $(LDFLAGS) -o $@ \
+LINK = $(call quiet-command,$(LINKPROG) $(QEMU_CFLAGS) $(CFLAGS) $(LDFLAGS) -o $@ \
$(sort $(filter %.o, $1)) $(filter-out %.o, $1) $(version-obj-y) \
$(LIBS)," LINK $(TARGET_DIR)$@")
else
@@ -41,7 +46,7 @@ LIBTOOL += $(if $(V),,--quiet)
LINK = $(call quiet-command,\
$(if $(filter %.lo %.la,$^),$(LIBTOOL) --mode=link --tag=CC \
- )$(CC) $(QEMU_CFLAGS) $(CFLAGS) $(LDFLAGS) -o $@ \
+ )$(LINKPROG) $(QEMU_CFLAGS) $(CFLAGS) $(LDFLAGS) -o $@ \
$(sort $(filter %.o, $1)) $(filter-out %.o, $1) \
$(if $(filter %.lo %.la,$^),$(version-lobj-y),$(version-obj-y)) \
$(if $(filter %.lo %.la,$^),$(LIBTOOLFLAGS)) \
@@ -54,6 +59,9 @@ endif
%.o: %.asm
$(call quiet-command,$(AS) $(ASFLAGS) -o $@ $<," AS $(TARGET_DIR)$@")
+%.o: %.cc
+ $(call quiet-command,$(CXX) $(QEMU_INCLUDES) $(QEMU_CXXFLAGS) $(QEMU_DGFLAGS) $(CFLAGS) -c -o $@ $<," CXX $(TARGET_DIR)$@")
+
%.o: %.cpp
$(call quiet-command,$(CXX) $(QEMU_INCLUDES) $(QEMU_CXXFLAGS) $(QEMU_DGFLAGS) $(CFLAGS) -c -o $@ $<," CXX $(TARGET_DIR)$@")
@@ -77,7 +85,7 @@ quiet-command = $(if $(V),$1,$(if $(2),@echo $2 && $1, @$1))
cc-option = $(if $(shell $(CC) $1 $2 -S -o /dev/null -xc /dev/null \
>/dev/null 2>&1 && echo OK), $2, $3)
-VPATH_SUFFIXES = %.c %.h %.S %.cpp %.m %.mak %.texi %.sh %.rc
+VPATH_SUFFIXES = %.c %.h %.S %.cc %.cpp %.m %.mak %.texi %.sh %.rc
set-vpath = $(if $1,$(foreach PATTERN,$(VPATH_SUFFIXES),$(eval vpath $(PATTERN) $1)))
# find-in-path
diff --git a/target-alpha/helper.c b/target-alpha/helper.c
index fc61bb02f7..025fdaf4d1 100644
--- a/target-alpha/helper.c
+++ b/target-alpha/helper.c
@@ -213,6 +213,7 @@ static int get_physical_address(CPUAlphaState *env, target_ulong addr,
int prot_need, int mmu_idx,
target_ulong *pphys, int *pprot)
{
+ CPUState *cs = ENV_GET_CPU(env);
target_long saddr = addr;
target_ulong phys = 0;
target_ulong L1pte, L2pte, L3pte;
@@ -251,7 +252,7 @@ static int get_physical_address(CPUAlphaState *env, target_ulong addr,
/* L1 page table read. */
index = (addr >> (TARGET_PAGE_BITS + 20)) & 0x3ff;
- L1pte = ldq_phys(pt + index*8);
+ L1pte = ldq_phys(cs->as, pt + index*8);
if (unlikely((L1pte & PTE_VALID) == 0)) {
ret = MM_K_TNV;
@@ -264,7 +265,7 @@ static int get_physical_address(CPUAlphaState *env, target_ulong addr,
/* L2 page table read. */
index = (addr >> (TARGET_PAGE_BITS + 10)) & 0x3ff;
- L2pte = ldq_phys(pt + index*8);
+ L2pte = ldq_phys(cs->as, pt + index*8);
if (unlikely((L2pte & PTE_VALID) == 0)) {
ret = MM_K_TNV;
@@ -277,7 +278,7 @@ static int get_physical_address(CPUAlphaState *env, target_ulong addr,
/* L3 page table read. */
index = (addr >> TARGET_PAGE_BITS) & 0x3ff;
- L3pte = ldq_phys(pt + index*8);
+ L3pte = ldq_phys(cs->as, pt + index*8);
phys = L3pte >> 32 << TARGET_PAGE_BITS;
if (unlikely((L3pte & PTE_VALID) == 0)) {
diff --git a/target-alpha/helper.h b/target-alpha/helper.h
index 5a0e78cefb..4f127c49c5 100644
--- a/target-alpha/helper.h
+++ b/target-alpha/helper.h
@@ -101,12 +101,12 @@ DEF_HELPER_FLAGS_2(ieee_input_cmp, TCG_CALL_NO_WG, void, env, i64)
DEF_HELPER_2(hw_ret, void, env, i64)
DEF_HELPER_3(call_pal, void, env, i64, i64)
-DEF_HELPER_1(ldl_phys, i64, i64)
-DEF_HELPER_1(ldq_phys, i64, i64)
+DEF_HELPER_2(ldl_phys, i64, env, i64)
+DEF_HELPER_2(ldq_phys, i64, env, i64)
DEF_HELPER_2(ldl_l_phys, i64, env, i64)
DEF_HELPER_2(ldq_l_phys, i64, env, i64)
-DEF_HELPER_2(stl_phys, void, i64, i64)
-DEF_HELPER_2(stq_phys, void, i64, i64)
+DEF_HELPER_3(stl_phys, void, env, i64, i64)
+DEF_HELPER_3(stq_phys, void, env, i64, i64)
DEF_HELPER_3(stl_c_phys, i64, env, i64, i64)
DEF_HELPER_3(stq_c_phys, i64, env, i64, i64)
diff --git a/target-alpha/mem_helper.c b/target-alpha/mem_helper.c
index 7160a1cd4f..ea587043d4 100644
--- a/target-alpha/mem_helper.c
+++ b/target-alpha/mem_helper.c
@@ -24,46 +24,53 @@
/* Softmmu support */
#ifndef CONFIG_USER_ONLY
-uint64_t helper_ldl_phys(uint64_t p)
+uint64_t helper_ldl_phys(CPUAlphaState *env, uint64_t p)
{
- return (int32_t)ldl_phys(p);
+ CPUState *cs = ENV_GET_CPU(env);
+ return (int32_t)ldl_phys(cs->as, p);
}
-uint64_t helper_ldq_phys(uint64_t p)
+uint64_t helper_ldq_phys(CPUAlphaState *env, uint64_t p)
{
- return ldq_phys(p);
+ CPUState *cs = ENV_GET_CPU(env);
+ return ldq_phys(cs->as, p);
}
uint64_t helper_ldl_l_phys(CPUAlphaState *env, uint64_t p)
{
+ CPUState *cs = ENV_GET_CPU(env);
env->lock_addr = p;
- return env->lock_value = (int32_t)ldl_phys(p);
+ return env->lock_value = (int32_t)ldl_phys(cs->as, p);
}
uint64_t helper_ldq_l_phys(CPUAlphaState *env, uint64_t p)
{
+ CPUState *cs = ENV_GET_CPU(env);
env->lock_addr = p;
- return env->lock_value = ldq_phys(p);
+ return env->lock_value = ldq_phys(cs->as, p);
}
-void helper_stl_phys(uint64_t p, uint64_t v)
+void helper_stl_phys(CPUAlphaState *env, uint64_t p, uint64_t v)
{
- stl_phys(p, v);
+ CPUState *cs = ENV_GET_CPU(env);
+ stl_phys(cs->as, p, v);
}
-void helper_stq_phys(uint64_t p, uint64_t v)
+void helper_stq_phys(CPUAlphaState *env, uint64_t p, uint64_t v)
{
- stq_phys(p, v);
+ CPUState *cs = ENV_GET_CPU(env);
+ stq_phys(cs->as, p, v);
}
uint64_t helper_stl_c_phys(CPUAlphaState *env, uint64_t p, uint64_t v)
{
+ CPUState *cs = ENV_GET_CPU(env);
uint64_t ret = 0;
if (p == env->lock_addr) {
- int32_t old = ldl_phys(p);
+ int32_t old = ldl_phys(cs->as, p);
if (old == (int32_t)env->lock_value) {
- stl_phys(p, v);
+ stl_phys(cs->as, p, v);
ret = 1;
}
}
@@ -74,12 +81,13 @@ uint64_t helper_stl_c_phys(CPUAlphaState *env, uint64_t p, uint64_t v)
uint64_t helper_stq_c_phys(CPUAlphaState *env, uint64_t p, uint64_t v)
{
+ CPUState *cs = ENV_GET_CPU(env);
uint64_t ret = 0;
if (p == env->lock_addr) {
- uint64_t old = ldq_phys(p);
+ uint64_t old = ldq_phys(cs->as, p);
if (old == env->lock_value) {
- stq_phys(p, v);
+ stq_phys(cs->as, p, v);
ret = 1;
}
}
diff --git a/target-alpha/translate.c b/target-alpha/translate.c
index 1155e86e29..4c94bed704 100644
--- a/target-alpha/translate.c
+++ b/target-alpha/translate.c
@@ -2912,11 +2912,11 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
switch ((insn >> 12) & 0xF) {
case 0x0:
/* Longword physical access (hw_ldl/p) */
- gen_helper_ldl_phys(cpu_ir[ra], addr);
+ gen_helper_ldl_phys(cpu_ir[ra], cpu_env, addr);
break;
case 0x1:
/* Quadword physical access (hw_ldq/p) */
- gen_helper_ldq_phys(cpu_ir[ra], addr);
+ gen_helper_ldq_phys(cpu_ir[ra], cpu_env, addr);
break;
case 0x2:
/* Longword physical access with lock (hw_ldl_l/p) */
@@ -3225,11 +3225,11 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
switch ((insn >> 12) & 0xF) {
case 0x0:
/* Longword physical access */
- gen_helper_stl_phys(addr, val);
+ gen_helper_stl_phys(cpu_env, addr, val);
break;
case 0x1:
/* Quadword physical access */
- gen_helper_stq_phys(addr, val);
+ gen_helper_stq_phys(cpu_env, addr, val);
break;
case 0x2:
/* Longword physical access with lock */
diff --git a/target-arm/helper.c b/target-arm/helper.c
index ca5b0000ad..5ae08c9ad1 100644
--- a/target-arm/helper.c
+++ b/target-arm/helper.c
@@ -2449,14 +2449,16 @@ void switch_mode(CPUARMState *env, int mode)
static void v7m_push(CPUARMState *env, uint32_t val)
{
+ CPUState *cs = ENV_GET_CPU(env);
env->regs[13] -= 4;
- stl_phys(env->regs[13], val);
+ stl_phys(cs->as, env->regs[13], val);
}
static uint32_t v7m_pop(CPUARMState *env)
{
+ CPUState *cs = ENV_GET_CPU(env);
uint32_t val;
- val = ldl_phys(env->regs[13]);
+ val = ldl_phys(cs->as, env->regs[13]);
env->regs[13] += 4;
return val;
}
@@ -2611,7 +2613,7 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
/* Clear IT bits */
env->condexec_bits = 0;
env->regs[14] = lr;
- addr = ldl_phys(env->v7m.vecbase + env->v7m.exception * 4);
+ addr = ldl_phys(cs->as, env->v7m.vecbase + env->v7m.exception * 4);
env->regs[15] = addr & 0xfffffffe;
env->thumb = addr & 1;
}
@@ -2816,6 +2818,7 @@ static int get_phys_addr_v5(CPUARMState *env, uint32_t address, int access_type,
int is_user, hwaddr *phys_ptr,
int *prot, target_ulong *page_size)
{
+ CPUState *cs = ENV_GET_CPU(env);
int code;
uint32_t table;
uint32_t desc;
@@ -2828,7 +2831,7 @@ static int get_phys_addr_v5(CPUARMState *env, uint32_t address, int access_type,
/* Pagetable walk. */
/* Lookup l1 descriptor. */
table = get_level1_table_address(env, address);
- desc = ldl_phys(table);
+ desc = ldl_phys(cs->as, table);
type = (desc & 3);
domain = (desc >> 5) & 0x0f;
domain_prot = (env->cp15.c3 >> (domain * 2)) & 3;
@@ -2859,7 +2862,7 @@ static int get_phys_addr_v5(CPUARMState *env, uint32_t address, int access_type,
/* Fine pagetable. */
table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
}
- desc = ldl_phys(table);
+ desc = ldl_phys(cs->as, table);
switch (desc & 3) {
case 0: /* Page translation fault. */
code = 7;
@@ -2911,6 +2914,7 @@ static int get_phys_addr_v6(CPUARMState *env, uint32_t address, int access_type,
int is_user, hwaddr *phys_ptr,
int *prot, target_ulong *page_size)
{
+ CPUState *cs = ENV_GET_CPU(env);
int code;
uint32_t table;
uint32_t desc;
@@ -2925,7 +2929,7 @@ static int get_phys_addr_v6(CPUARMState *env, uint32_t address, int access_type,
/* Pagetable walk. */
/* Lookup l1 descriptor. */
table = get_level1_table_address(env, address);
- desc = ldl_phys(table);
+ desc = ldl_phys(cs->as, table);
type = (desc & 3);
if (type == 0 || (type == 3 && !arm_feature(env, ARM_FEATURE_PXN))) {
/* Section translation fault, or attempt to use the encoding
@@ -2967,7 +2971,7 @@ static int get_phys_addr_v6(CPUARMState *env, uint32_t address, int access_type,
}
/* Lookup l2 entry. */
table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
- desc = ldl_phys(table);
+ desc = ldl_phys(cs->as, table);
ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
switch (desc & 3) {
case 0: /* Page translation fault. */
@@ -3033,6 +3037,7 @@ static int get_phys_addr_lpae(CPUARMState *env, uint32_t address,
hwaddr *phys_ptr, int *prot,
target_ulong *page_size_ptr)
{
+ CPUState *cs = ENV_GET_CPU(env);
/* Read an LPAE long-descriptor translation table. */
MMUFaultType fault_type = translation_fault;
uint32_t level = 1;
@@ -3121,7 +3126,7 @@ static int get_phys_addr_lpae(CPUARMState *env, uint32_t address,
uint64_t descriptor;
descaddr |= ((address >> (9 * (4 - level))) & 0xff8);
- descriptor = ldq_phys(descaddr);
+ descriptor = ldq_phys(cs->as, descaddr);
if (!(descriptor & 1) ||
(!(descriptor & 2) && (level == 3))) {
/* Invalid, or the Reserved level 3 encoding */
diff --git a/target-arm/helper.h b/target-arm/helper.h
index 71b8411120..951e6ada07 100644
--- a/target-arm/helper.h
+++ b/target-arm/helper.h
@@ -320,6 +320,7 @@ DEF_HELPER_1(neon_cls_s8, i32, i32)
DEF_HELPER_1(neon_cls_s16, i32, i32)
DEF_HELPER_1(neon_cls_s32, i32, i32)
DEF_HELPER_1(neon_cnt_u8, i32, i32)
+DEF_HELPER_FLAGS_1(neon_rbit_u8, TCG_CALL_NO_RWG_SE, i32, i32)
DEF_HELPER_3(neon_qdmulh_s16, i32, env, i32, i32)
DEF_HELPER_3(neon_qrdmulh_s16, i32, env, i32, i32)
diff --git a/target-arm/neon_helper.c b/target-arm/neon_helper.c
index be6fbd997e..b4c86904f4 100644
--- a/target-arm/neon_helper.c
+++ b/target-arm/neon_helper.c
@@ -1133,6 +1133,18 @@ uint32_t HELPER(neon_cnt_u8)(uint32_t x)
return x;
}
+/* Reverse bits in each 8 bit word */
+uint32_t HELPER(neon_rbit_u8)(uint32_t x)
+{
+ x = ((x & 0xf0f0f0f0) >> 4)
+ | ((x & 0x0f0f0f0f) << 4);
+ x = ((x & 0x88888888) >> 3)
+ | ((x & 0x44444444) >> 1)
+ | ((x & 0x22222222) << 1)
+ | ((x & 0x11111111) << 3);
+ return x;
+}
+
#define NEON_QDMULH16(dest, src1, src2, round) do { \
uint32_t tmp = (int32_t)(int16_t) src1 * (int16_t) src2; \
if ((tmp ^ (tmp << 1)) & SIGNBIT) { \
diff --git a/target-arm/translate-a64.c b/target-arm/translate-a64.c
index 6c1ec1edc6..d60223af8e 100644
--- a/target-arm/translate-a64.c
+++ b/target-arm/translate-a64.c
@@ -74,6 +74,9 @@ typedef struct AArch64DecodeTable {
/* Function prototype for gen_ functions for calling Neon helpers */
typedef void NeonGenTwoOpFn(TCGv_i32, TCGv_i32, TCGv_i32);
+typedef void NeonGenTwoOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32);
+typedef void NeonGenNarrowFn(TCGv_i32, TCGv_i64);
+typedef void NeonGenNarrowEnvFn(TCGv_i32, TCGv_ptr, TCGv_i64);
/* initialize TCG globals. */
void a64_translate_init(void)
@@ -5500,7 +5503,119 @@ static void disas_simd_scalar_copy(DisasContext *s, uint32_t insn)
*/
static void disas_simd_scalar_pairwise(DisasContext *s, uint32_t insn)
{
- unsupported_encoding(s, insn);
+ int u = extract32(insn, 29, 1);
+ int size = extract32(insn, 22, 2);
+ int opcode = extract32(insn, 12, 5);
+ int rn = extract32(insn, 5, 5);
+ int rd = extract32(insn, 0, 5);
+ TCGv_ptr fpst;
+
+ /* For some ops (the FP ones), size[1] is part of the encoding.
+ * For ADDP strictly it is not but size[1] is always 1 for valid
+ * encodings.
+ */
+ opcode |= (extract32(size, 1, 1) << 5);
+
+ switch (opcode) {
+ case 0x3b: /* ADDP */
+ if (u || size != 3) {
+ unallocated_encoding(s);
+ return;
+ }
+ TCGV_UNUSED_PTR(fpst);
+ break;
+ case 0xc: /* FMAXNMP */
+ case 0xd: /* FADDP */
+ case 0xf: /* FMAXP */
+ case 0x2c: /* FMINNMP */
+ case 0x2f: /* FMINP */
+ /* FP op, size[0] is 32 or 64 bit */
+ if (!u) {
+ unallocated_encoding(s);
+ return;
+ }
+ size = extract32(size, 0, 1) ? 3 : 2;
+ fpst = get_fpstatus_ptr();
+ break;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (size == 3) {
+ TCGv_i64 tcg_op1 = tcg_temp_new_i64();
+ TCGv_i64 tcg_op2 = tcg_temp_new_i64();
+ TCGv_i64 tcg_res = tcg_temp_new_i64();
+
+ read_vec_element(s, tcg_op1, rn, 0, MO_64);
+ read_vec_element(s, tcg_op2, rn, 1, MO_64);
+
+ switch (opcode) {
+ case 0x3b: /* ADDP */
+ tcg_gen_add_i64(tcg_res, tcg_op1, tcg_op2);
+ break;
+ case 0xc: /* FMAXNMP */
+ gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0xd: /* FADDP */
+ gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0xf: /* FMAXP */
+ gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x2c: /* FMINNMP */
+ gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x2f: /* FMINP */
+ gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ write_fp_dreg(s, rd, tcg_res);
+
+ tcg_temp_free_i64(tcg_op1);
+ tcg_temp_free_i64(tcg_op2);
+ tcg_temp_free_i64(tcg_res);
+ } else {
+ TCGv_i32 tcg_op1 = tcg_temp_new_i32();
+ TCGv_i32 tcg_op2 = tcg_temp_new_i32();
+ TCGv_i32 tcg_res = tcg_temp_new_i32();
+
+ read_vec_element_i32(s, tcg_op1, rn, 0, MO_32);
+ read_vec_element_i32(s, tcg_op2, rn, 1, MO_32);
+
+ switch (opcode) {
+ case 0xc: /* FMAXNMP */
+ gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0xd: /* FADDP */
+ gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0xf: /* FMAXP */
+ gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x2c: /* FMINNMP */
+ gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x2f: /* FMINP */
+ gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ write_fp_sreg(s, rd, tcg_res);
+
+ tcg_temp_free_i32(tcg_op1);
+ tcg_temp_free_i32(tcg_op2);
+ tcg_temp_free_i32(tcg_res);
+ }
+
+ if (!TCGV_IS_UNUSED_PTR(fpst)) {
+ tcg_temp_free_ptr(fpst);
+ }
}
/*
@@ -5738,6 +5853,20 @@ static void handle_3same_64(DisasContext *s, int opcode, bool u,
TCGCond cond;
switch (opcode) {
+ case 0x1: /* SQADD */
+ if (u) {
+ gen_helper_neon_qadd_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
+ } else {
+ gen_helper_neon_qadd_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
+ }
+ break;
+ case 0x5: /* SQSUB */
+ if (u) {
+ gen_helper_neon_qsub_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
+ } else {
+ gen_helper_neon_qsub_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
+ }
+ break;
case 0x6: /* CMGT, CMHI */
/* 64 bit integer comparison, result = test ? (2^64 - 1) : 0.
* We implement this using setcond (test) and then negating.
@@ -5760,19 +5889,41 @@ static void handle_3same_64(DisasContext *s, int opcode, bool u,
tcg_gen_setcondi_i64(TCG_COND_NE, tcg_rd, tcg_rd, 0);
tcg_gen_neg_i64(tcg_rd, tcg_rd);
break;
- case 0x10: /* ADD, SUB */
+ case 0x8: /* SSHL, USHL */
if (u) {
- tcg_gen_sub_i64(tcg_rd, tcg_rn, tcg_rm);
+ gen_helper_neon_shl_u64(tcg_rd, tcg_rn, tcg_rm);
} else {
- tcg_gen_add_i64(tcg_rd, tcg_rn, tcg_rm);
+ gen_helper_neon_shl_s64(tcg_rd, tcg_rn, tcg_rm);
}
break;
- case 0x1: /* SQADD */
- case 0x5: /* SQSUB */
- case 0x8: /* SSHL, USHL */
case 0x9: /* SQSHL, UQSHL */
+ if (u) {
+ gen_helper_neon_qshl_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
+ } else {
+ gen_helper_neon_qshl_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
+ }
+ break;
case 0xa: /* SRSHL, URSHL */
+ if (u) {
+ gen_helper_neon_rshl_u64(tcg_rd, tcg_rn, tcg_rm);
+ } else {
+ gen_helper_neon_rshl_s64(tcg_rd, tcg_rn, tcg_rm);
+ }
+ break;
case 0xb: /* SQRSHL, UQRSHL */
+ if (u) {
+ gen_helper_neon_qrshl_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
+ } else {
+ gen_helper_neon_qrshl_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
+ }
+ break;
+ case 0x10: /* ADD, SUB */
+ if (u) {
+ tcg_gen_sub_i64(tcg_rd, tcg_rn, tcg_rm);
+ } else {
+ tcg_gen_add_i64(tcg_rd, tcg_rn, tcg_rm);
+ }
+ break;
default:
g_assert_not_reached();
}
@@ -5917,8 +6068,6 @@ static void disas_simd_scalar_three_reg_same(DisasContext *s, uint32_t insn)
int rm = extract32(insn, 16, 5);
int size = extract32(insn, 22, 2);
bool u = extract32(insn, 29, 1);
- TCGv_i64 tcg_rn;
- TCGv_i64 tcg_rm;
TCGv_i64 tcg_rd;
if (opcode >= 0x18) {
@@ -5949,10 +6098,11 @@ static void disas_simd_scalar_three_reg_same(DisasContext *s, uint32_t insn)
switch (opcode) {
case 0x1: /* SQADD, UQADD */
case 0x5: /* SQSUB, UQSUB */
+ case 0x9: /* SQSHL, UQSHL */
+ case 0xb: /* SQRSHL, UQRSHL */
+ break;
case 0x8: /* SSHL, USHL */
case 0xa: /* SRSHL, URSHL */
- unsupported_encoding(s, insn);
- return;
case 0x6: /* CMGT, CMHI */
case 0x7: /* CMGE, CMHS */
case 0x11: /* CMTST, CMEQ */
@@ -5962,39 +6112,162 @@ static void disas_simd_scalar_three_reg_same(DisasContext *s, uint32_t insn)
return;
}
break;
- case 0x9: /* SQSHL, UQSHL */
- case 0xb: /* SQRSHL, UQRSHL */
- unsupported_encoding(s, insn);
- return;
case 0x16: /* SQDMULH, SQRDMULH (vector) */
if (size != 1 && size != 2) {
unallocated_encoding(s);
return;
}
- unsupported_encoding(s, insn);
- return;
+ break;
default:
unallocated_encoding(s);
return;
}
- tcg_rn = read_fp_dreg(s, rn); /* op1 */
- tcg_rm = read_fp_dreg(s, rm); /* op2 */
tcg_rd = tcg_temp_new_i64();
- /* For the moment we only support the opcodes which are
- * 64-bit-width only. The size != 3 cases will
- * be handled later when the relevant ops are implemented.
- */
- handle_3same_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rm);
+ if (size == 3) {
+ TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
+ TCGv_i64 tcg_rm = read_fp_dreg(s, rm);
+
+ handle_3same_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rm);
+ tcg_temp_free_i64(tcg_rn);
+ tcg_temp_free_i64(tcg_rm);
+ } else {
+ /* Do a single operation on the lowest element in the vector.
+ * We use the standard Neon helpers and rely on 0 OP 0 == 0 with
+ * no side effects for all these operations.
+ * OPTME: special-purpose helpers would avoid doing some
+ * unnecessary work in the helper for the 8 and 16 bit cases.
+ */
+ NeonGenTwoOpEnvFn *genenvfn;
+ TCGv_i32 tcg_rn = tcg_temp_new_i32();
+ TCGv_i32 tcg_rm = tcg_temp_new_i32();
+ TCGv_i32 tcg_rd32 = tcg_temp_new_i32();
+
+ read_vec_element_i32(s, tcg_rn, rn, 0, size);
+ read_vec_element_i32(s, tcg_rm, rm, 0, size);
+
+ switch (opcode) {
+ case 0x1: /* SQADD, UQADD */
+ {
+ static NeonGenTwoOpEnvFn * const fns[3][2] = {
+ { gen_helper_neon_qadd_s8, gen_helper_neon_qadd_u8 },
+ { gen_helper_neon_qadd_s16, gen_helper_neon_qadd_u16 },
+ { gen_helper_neon_qadd_s32, gen_helper_neon_qadd_u32 },
+ };
+ genenvfn = fns[size][u];
+ break;
+ }
+ case 0x5: /* SQSUB, UQSUB */
+ {
+ static NeonGenTwoOpEnvFn * const fns[3][2] = {
+ { gen_helper_neon_qsub_s8, gen_helper_neon_qsub_u8 },
+ { gen_helper_neon_qsub_s16, gen_helper_neon_qsub_u16 },
+ { gen_helper_neon_qsub_s32, gen_helper_neon_qsub_u32 },
+ };
+ genenvfn = fns[size][u];
+ break;
+ }
+ case 0x9: /* SQSHL, UQSHL */
+ {
+ static NeonGenTwoOpEnvFn * const fns[3][2] = {
+ { gen_helper_neon_qshl_s8, gen_helper_neon_qshl_u8 },
+ { gen_helper_neon_qshl_s16, gen_helper_neon_qshl_u16 },
+ { gen_helper_neon_qshl_s32, gen_helper_neon_qshl_u32 },
+ };
+ genenvfn = fns[size][u];
+ break;
+ }
+ case 0xb: /* SQRSHL, UQRSHL */
+ {
+ static NeonGenTwoOpEnvFn * const fns[3][2] = {
+ { gen_helper_neon_qrshl_s8, gen_helper_neon_qrshl_u8 },
+ { gen_helper_neon_qrshl_s16, gen_helper_neon_qrshl_u16 },
+ { gen_helper_neon_qrshl_s32, gen_helper_neon_qrshl_u32 },
+ };
+ genenvfn = fns[size][u];
+ break;
+ }
+ case 0x16: /* SQDMULH, SQRDMULH */
+ {
+ static NeonGenTwoOpEnvFn * const fns[2][2] = {
+ { gen_helper_neon_qdmulh_s16, gen_helper_neon_qrdmulh_s16 },
+ { gen_helper_neon_qdmulh_s32, gen_helper_neon_qrdmulh_s32 },
+ };
+ assert(size == 1 || size == 2);
+ genenvfn = fns[size - 1][u];
+ break;
+ }
+ default:
+ g_assert_not_reached();
+ }
+
+ genenvfn(tcg_rd32, cpu_env, tcg_rn, tcg_rm);
+ tcg_gen_extu_i32_i64(tcg_rd, tcg_rd32);
+ tcg_temp_free_i32(tcg_rd32);
+ tcg_temp_free_i32(tcg_rn);
+ tcg_temp_free_i32(tcg_rm);
+ }
write_fp_dreg(s, rd, tcg_rd);
- tcg_temp_free_i64(tcg_rn);
- tcg_temp_free_i64(tcg_rm);
tcg_temp_free_i64(tcg_rd);
}
+static void handle_2misc_64(DisasContext *s, int opcode, bool u,
+ TCGv_i64 tcg_rd, TCGv_i64 tcg_rn)
+{
+ /* Handle 64->64 opcodes which are shared between the scalar and
+ * vector 2-reg-misc groups. We cover every integer opcode where size == 3
+ * is valid in either group and also the double-precision fp ops.
+ */
+ TCGCond cond;
+
+ switch (opcode) {
+ case 0x5: /* NOT */
+ /* This opcode is shared with CNT and RBIT but we have earlier
+ * enforced that size == 3 if and only if this is the NOT insn.
+ */
+ tcg_gen_not_i64(tcg_rd, tcg_rn);
+ break;
+ case 0xa: /* CMLT */
+ /* 64 bit integer comparison against zero, result is
+ * test ? (2^64 - 1) : 0. We implement via setcond(!test) and
+ * subtracting 1.
+ */
+ cond = TCG_COND_LT;
+ do_cmop:
+ tcg_gen_setcondi_i64(cond, tcg_rd, tcg_rn, 0);
+ tcg_gen_neg_i64(tcg_rd, tcg_rd);
+ break;
+ case 0x8: /* CMGT, CMGE */
+ cond = u ? TCG_COND_GE : TCG_COND_GT;
+ goto do_cmop;
+ case 0x9: /* CMEQ, CMLE */
+ cond = u ? TCG_COND_LE : TCG_COND_EQ;
+ goto do_cmop;
+ case 0xb: /* ABS, NEG */
+ if (u) {
+ tcg_gen_neg_i64(tcg_rd, tcg_rn);
+ } else {
+ TCGv_i64 tcg_zero = tcg_const_i64(0);
+ tcg_gen_neg_i64(tcg_rd, tcg_rn);
+ tcg_gen_movcond_i64(TCG_COND_GT, tcg_rd, tcg_rn, tcg_zero,
+ tcg_rn, tcg_rd);
+ tcg_temp_free_i64(tcg_zero);
+ }
+ break;
+ case 0x2f: /* FABS */
+ gen_helper_vfp_absd(tcg_rd, tcg_rn);
+ break;
+ case 0x6f: /* FNEG */
+ gen_helper_vfp_negd(tcg_rd, tcg_rn);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
/* C3.6.12 AdvSIMD scalar two reg misc
* 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
* +-----+---+-----------+------+-----------+--------+-----+------+------+
@@ -6003,7 +6276,50 @@ static void disas_simd_scalar_three_reg_same(DisasContext *s, uint32_t insn)
*/
static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn)
{
- unsupported_encoding(s, insn);
+ int rd = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int opcode = extract32(insn, 12, 5);
+ int size = extract32(insn, 22, 2);
+ bool u = extract32(insn, 29, 1);
+
+ switch (opcode) {
+ case 0xa: /* CMLT */
+ if (u) {
+ unallocated_encoding(s);
+ return;
+ }
+ /* fall through */
+ case 0x8: /* CMGT, CMGE */
+ case 0x9: /* CMEQ, CMLE */
+ case 0xb: /* ABS, NEG */
+ if (size != 3) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ default:
+ /* Other categories of encoding in this class:
+ * + floating point (single and double)
+ * + SUQADD/USQADD/SQABS/SQNEG : size 8, 16, 32 or 64
+ * + SQXTN/SQXTN2/SQXTUN/SQXTUN2/UQXTN/UQXTN2:
+ * narrowing saturate ops: size 64/32/16 -> 32/16/8
+ */
+ unsupported_encoding(s, insn);
+ return;
+ }
+
+ if (size == 3) {
+ TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
+ TCGv_i64 tcg_rd = tcg_temp_new_i64();
+
+ handle_2misc_64(s, opcode, u, tcg_rd, tcg_rn);
+ write_fp_dreg(s, rd, tcg_rd);
+ tcg_temp_free_i64(tcg_rd);
+ tcg_temp_free_i64(tcg_rn);
+ } else {
+ /* the 'size might not be 64' ops aren't implemented yet */
+ g_assert_not_reached();
+ }
}
/* C3.6.13 AdvSIMD scalar x indexed element
@@ -6519,10 +6835,153 @@ static void disas_simd_3same_logic(DisasContext *s, uint32_t insn)
tcg_temp_free_i64(tcg_res[1]);
}
+/* Helper functions for 32 bit comparisons */
+static void gen_max_s32(TCGv_i32 res, TCGv_i32 op1, TCGv_i32 op2)
+{
+ tcg_gen_movcond_i32(TCG_COND_GE, res, op1, op2, op1, op2);
+}
+
+static void gen_max_u32(TCGv_i32 res, TCGv_i32 op1, TCGv_i32 op2)
+{
+ tcg_gen_movcond_i32(TCG_COND_GEU, res, op1, op2, op1, op2);
+}
+
+static void gen_min_s32(TCGv_i32 res, TCGv_i32 op1, TCGv_i32 op2)
+{
+ tcg_gen_movcond_i32(TCG_COND_LE, res, op1, op2, op1, op2);
+}
+
+static void gen_min_u32(TCGv_i32 res, TCGv_i32 op1, TCGv_i32 op2)
+{
+ tcg_gen_movcond_i32(TCG_COND_LEU, res, op1, op2, op1, op2);
+}
+
/* Pairwise op subgroup of C3.6.16. */
static void disas_simd_3same_pair(DisasContext *s, uint32_t insn)
{
- unsupported_encoding(s, insn);
+ int is_q = extract32(insn, 30, 1);
+ int u = extract32(insn, 29, 1);
+ int size = extract32(insn, 22, 2);
+ int opcode = extract32(insn, 11, 5);
+ int rm = extract32(insn, 16, 5);
+ int rn = extract32(insn, 5, 5);
+ int rd = extract32(insn, 0, 5);
+ int pass;
+
+ if (size == 3 && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ switch (opcode) {
+ case 0x14: /* SMAXP, UMAXP */
+ case 0x15: /* SMINP, UMINP */
+ if (size == 3) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ case 0x17:
+ if (u) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ /* These operations work on the concatenated rm:rn, with each pair of
+ * adjacent elements being operated on to produce an element in the result.
+ */
+ if (size == 3) {
+ TCGv_i64 tcg_res[2];
+
+ for (pass = 0; pass < 2; pass++) {
+ TCGv_i64 tcg_op1 = tcg_temp_new_i64();
+ TCGv_i64 tcg_op2 = tcg_temp_new_i64();
+ int passreg = (pass == 0) ? rn : rm;
+
+ read_vec_element(s, tcg_op1, passreg, 0, MO_64);
+ read_vec_element(s, tcg_op2, passreg, 1, MO_64);
+ tcg_res[pass] = tcg_temp_new_i64();
+
+ /* The only 64 bit pairwise integer op is ADDP */
+ assert(opcode == 0x17);
+ tcg_gen_add_i64(tcg_res[pass], tcg_op1, tcg_op2);
+
+ tcg_temp_free_i64(tcg_op1);
+ tcg_temp_free_i64(tcg_op2);
+ }
+
+ for (pass = 0; pass < 2; pass++) {
+ write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
+ tcg_temp_free_i64(tcg_res[pass]);
+ }
+ } else {
+ int maxpass = is_q ? 4 : 2;
+ TCGv_i32 tcg_res[4];
+
+ for (pass = 0; pass < maxpass; pass++) {
+ TCGv_i32 tcg_op1 = tcg_temp_new_i32();
+ TCGv_i32 tcg_op2 = tcg_temp_new_i32();
+ NeonGenTwoOpFn *genfn;
+ int passreg = pass < (maxpass / 2) ? rn : rm;
+ int passelt = (is_q && (pass & 1)) ? 2 : 0;
+
+ read_vec_element_i32(s, tcg_op1, passreg, passelt, MO_32);
+ read_vec_element_i32(s, tcg_op2, passreg, passelt + 1, MO_32);
+ tcg_res[pass] = tcg_temp_new_i32();
+
+ switch (opcode) {
+ case 0x17: /* ADDP */
+ {
+ static NeonGenTwoOpFn * const fns[3] = {
+ gen_helper_neon_padd_u8,
+ gen_helper_neon_padd_u16,
+ tcg_gen_add_i32,
+ };
+ genfn = fns[size];
+ break;
+ }
+ case 0x14: /* SMAXP, UMAXP */
+ {
+ static NeonGenTwoOpFn * const fns[3][2] = {
+ { gen_helper_neon_pmax_s8, gen_helper_neon_pmax_u8 },
+ { gen_helper_neon_pmax_s16, gen_helper_neon_pmax_u16 },
+ { gen_max_s32, gen_max_u32 },
+ };
+ genfn = fns[size][u];
+ break;
+ }
+ case 0x15: /* SMINP, UMINP */
+ {
+ static NeonGenTwoOpFn * const fns[3][2] = {
+ { gen_helper_neon_pmin_s8, gen_helper_neon_pmin_u8 },
+ { gen_helper_neon_pmin_s16, gen_helper_neon_pmin_u16 },
+ { gen_min_s32, gen_min_u32 },
+ };
+ genfn = fns[size][u];
+ break;
+ }
+ default:
+ g_assert_not_reached();
+ }
+
+ genfn(tcg_res[pass], tcg_op1, tcg_op2);
+
+ tcg_temp_free_i32(tcg_op1);
+ tcg_temp_free_i32(tcg_op2);
+ }
+
+ for (pass = 0; pass < maxpass; pass++) {
+ write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32);
+ tcg_temp_free_i32(tcg_res[pass]);
+ }
+ if (!is_q) {
+ clear_vec_high(s, rd);
+ }
+ }
}
/* Floating point op subgroup of C3.6.16. */
@@ -6619,27 +7078,13 @@ static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
unallocated_encoding(s);
return;
}
- unsupported_encoding(s, insn);
- return;
- case 0x1: /* SQADD */
- case 0x5: /* SQSUB */
- case 0x8: /* SSHL, USHL */
- case 0x9: /* SQSHL, UQSHL */
- case 0xa: /* SRSHL, URSHL */
- case 0xb: /* SQRSHL, UQRSHL */
- if (size == 3 && !is_q) {
- unallocated_encoding(s);
- return;
- }
- unsupported_encoding(s, insn);
- return;
+ break;
case 0x16: /* SQDMULH, SQRDMULH */
if (size == 0 || size == 3) {
unallocated_encoding(s);
return;
}
- unsupported_encoding(s, insn);
- return;
+ break;
default:
if (size == 3 && !is_q) {
unallocated_encoding(s);
@@ -6670,12 +7115,63 @@ static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
TCGv_i32 tcg_op1 = tcg_temp_new_i32();
TCGv_i32 tcg_op2 = tcg_temp_new_i32();
TCGv_i32 tcg_res = tcg_temp_new_i32();
- NeonGenTwoOpFn *genfn;
+ NeonGenTwoOpFn *genfn = NULL;
+ NeonGenTwoOpEnvFn *genenvfn = NULL;
read_vec_element_i32(s, tcg_op1, rn, pass, MO_32);
read_vec_element_i32(s, tcg_op2, rm, pass, MO_32);
switch (opcode) {
+ case 0x0: /* SHADD, UHADD */
+ {
+ static NeonGenTwoOpFn * const fns[3][2] = {
+ { gen_helper_neon_hadd_s8, gen_helper_neon_hadd_u8 },
+ { gen_helper_neon_hadd_s16, gen_helper_neon_hadd_u16 },
+ { gen_helper_neon_hadd_s32, gen_helper_neon_hadd_u32 },
+ };
+ genfn = fns[size][u];
+ break;
+ }
+ case 0x1: /* SQADD, UQADD */
+ {
+ static NeonGenTwoOpEnvFn * const fns[3][2] = {
+ { gen_helper_neon_qadd_s8, gen_helper_neon_qadd_u8 },
+ { gen_helper_neon_qadd_s16, gen_helper_neon_qadd_u16 },
+ { gen_helper_neon_qadd_s32, gen_helper_neon_qadd_u32 },
+ };
+ genenvfn = fns[size][u];
+ break;
+ }
+ case 0x2: /* SRHADD, URHADD */
+ {
+ static NeonGenTwoOpFn * const fns[3][2] = {
+ { gen_helper_neon_rhadd_s8, gen_helper_neon_rhadd_u8 },
+ { gen_helper_neon_rhadd_s16, gen_helper_neon_rhadd_u16 },
+ { gen_helper_neon_rhadd_s32, gen_helper_neon_rhadd_u32 },
+ };
+ genfn = fns[size][u];
+ break;
+ }
+ case 0x4: /* SHSUB, UHSUB */
+ {
+ static NeonGenTwoOpFn * const fns[3][2] = {
+ { gen_helper_neon_hsub_s8, gen_helper_neon_hsub_u8 },
+ { gen_helper_neon_hsub_s16, gen_helper_neon_hsub_u16 },
+ { gen_helper_neon_hsub_s32, gen_helper_neon_hsub_u32 },
+ };
+ genfn = fns[size][u];
+ break;
+ }
+ case 0x5: /* SQSUB, UQSUB */
+ {
+ static NeonGenTwoOpEnvFn * const fns[3][2] = {
+ { gen_helper_neon_qsub_s8, gen_helper_neon_qsub_u8 },
+ { gen_helper_neon_qsub_s16, gen_helper_neon_qsub_u16 },
+ { gen_helper_neon_qsub_s32, gen_helper_neon_qsub_u32 },
+ };
+ genenvfn = fns[size][u];
+ break;
+ }
case 0x6: /* CMGT, CMHI */
{
static NeonGenTwoOpFn * const fns[3][2] = {
@@ -6696,6 +7192,78 @@ static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
genfn = fns[size][u];
break;
}
+ case 0x8: /* SSHL, USHL */
+ {
+ static NeonGenTwoOpFn * const fns[3][2] = {
+ { gen_helper_neon_shl_s8, gen_helper_neon_shl_u8 },
+ { gen_helper_neon_shl_s16, gen_helper_neon_shl_u16 },
+ { gen_helper_neon_shl_s32, gen_helper_neon_shl_u32 },
+ };
+ genfn = fns[size][u];
+ break;
+ }
+ case 0x9: /* SQSHL, UQSHL */
+ {
+ static NeonGenTwoOpEnvFn * const fns[3][2] = {
+ { gen_helper_neon_qshl_s8, gen_helper_neon_qshl_u8 },
+ { gen_helper_neon_qshl_s16, gen_helper_neon_qshl_u16 },
+ { gen_helper_neon_qshl_s32, gen_helper_neon_qshl_u32 },
+ };
+ genenvfn = fns[size][u];
+ break;
+ }
+ case 0xa: /* SRSHL, URSHL */
+ {
+ static NeonGenTwoOpFn * const fns[3][2] = {
+ { gen_helper_neon_rshl_s8, gen_helper_neon_rshl_u8 },
+ { gen_helper_neon_rshl_s16, gen_helper_neon_rshl_u16 },
+ { gen_helper_neon_rshl_s32, gen_helper_neon_rshl_u32 },
+ };
+ genfn = fns[size][u];
+ break;
+ }
+ case 0xb: /* SQRSHL, UQRSHL */
+ {
+ static NeonGenTwoOpEnvFn * const fns[3][2] = {
+ { gen_helper_neon_qrshl_s8, gen_helper_neon_qrshl_u8 },
+ { gen_helper_neon_qrshl_s16, gen_helper_neon_qrshl_u16 },
+ { gen_helper_neon_qrshl_s32, gen_helper_neon_qrshl_u32 },
+ };
+ genenvfn = fns[size][u];
+ break;
+ }
+ case 0xc: /* SMAX, UMAX */
+ {
+ static NeonGenTwoOpFn * const fns[3][2] = {
+ { gen_helper_neon_max_s8, gen_helper_neon_max_u8 },
+ { gen_helper_neon_max_s16, gen_helper_neon_max_u16 },
+ { gen_max_s32, gen_max_u32 },
+ };
+ genfn = fns[size][u];
+ break;
+ }
+
+ case 0xd: /* SMIN, UMIN */
+ {
+ static NeonGenTwoOpFn * const fns[3][2] = {
+ { gen_helper_neon_min_s8, gen_helper_neon_min_u8 },
+ { gen_helper_neon_min_s16, gen_helper_neon_min_u16 },
+ { gen_min_s32, gen_min_u32 },
+ };
+ genfn = fns[size][u];
+ break;
+ }
+ case 0xe: /* SABD, UABD */
+ case 0xf: /* SABA, UABA */
+ {
+ static NeonGenTwoOpFn * const fns[3][2] = {
+ { gen_helper_neon_abd_s8, gen_helper_neon_abd_u8 },
+ { gen_helper_neon_abd_s16, gen_helper_neon_abd_u16 },
+ { gen_helper_neon_abd_s32, gen_helper_neon_abd_u32 },
+ };
+ genfn = fns[size][u];
+ break;
+ }
case 0x10: /* ADD, SUB */
{
static NeonGenTwoOpFn * const fns[3][2] = {
@@ -6716,11 +7284,57 @@ static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
genfn = fns[size][u];
break;
}
+ case 0x13: /* MUL, PMUL */
+ if (u) {
+ /* PMUL */
+ assert(size == 0);
+ genfn = gen_helper_neon_mul_p8;
+ break;
+ }
+ /* fall through : MUL */
+ case 0x12: /* MLA, MLS */
+ {
+ static NeonGenTwoOpFn * const fns[3] = {
+ gen_helper_neon_mul_u8,
+ gen_helper_neon_mul_u16,
+ tcg_gen_mul_i32,
+ };
+ genfn = fns[size];
+ break;
+ }
+ case 0x16: /* SQDMULH, SQRDMULH */
+ {
+ static NeonGenTwoOpEnvFn * const fns[2][2] = {
+ { gen_helper_neon_qdmulh_s16, gen_helper_neon_qrdmulh_s16 },
+ { gen_helper_neon_qdmulh_s32, gen_helper_neon_qrdmulh_s32 },
+ };
+ assert(size == 1 || size == 2);
+ genenvfn = fns[size - 1][u];
+ break;
+ }
default:
g_assert_not_reached();
}
- genfn(tcg_res, tcg_op1, tcg_op2);
+ if (genenvfn) {
+ genenvfn(tcg_res, cpu_env, tcg_op1, tcg_op2);
+ } else {
+ genfn(tcg_res, tcg_op1, tcg_op2);
+ }
+
+ if (opcode == 0xf || opcode == 0x12) {
+ /* SABA, UABA, MLA, MLS: accumulating ops */
+ static NeonGenTwoOpFn * const fns[3][2] = {
+ { gen_helper_neon_add_u8, gen_helper_neon_sub_u8 },
+ { gen_helper_neon_add_u16, gen_helper_neon_sub_u16 },
+ { tcg_gen_add_i32, tcg_gen_sub_i32 },
+ };
+ bool is_sub = (opcode == 0x12 && u); /* MLS */
+
+ genfn = fns[size][is_sub];
+ read_vec_element_i32(s, tcg_op1, rd, pass, MO_32);
+ genfn(tcg_res, tcg_res, tcg_op1);
+ }
write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
@@ -6765,6 +7379,148 @@ static void disas_simd_three_reg_same(DisasContext *s, uint32_t insn)
}
}
+static void handle_2misc_narrow(DisasContext *s, int opcode, bool u, bool is_q,
+ int size, int rn, int rd)
+{
+ /* Handle 2-reg-misc ops which are narrowing (so each 2*size element
+ * in the source becomes a size element in the destination).
+ */
+ int pass;
+ TCGv_i32 tcg_res[2];
+ int destelt = is_q ? 2 : 0;
+
+ for (pass = 0; pass < 2; pass++) {
+ TCGv_i64 tcg_op = tcg_temp_new_i64();
+ NeonGenNarrowFn *genfn = NULL;
+ NeonGenNarrowEnvFn *genenvfn = NULL;
+
+ read_vec_element(s, tcg_op, rn, pass, MO_64);
+ tcg_res[pass] = tcg_temp_new_i32();
+
+ switch (opcode) {
+ case 0x12: /* XTN, SQXTUN */
+ {
+ static NeonGenNarrowFn * const xtnfns[3] = {
+ gen_helper_neon_narrow_u8,
+ gen_helper_neon_narrow_u16,
+ tcg_gen_trunc_i64_i32,
+ };
+ static NeonGenNarrowEnvFn * const sqxtunfns[3] = {
+ gen_helper_neon_unarrow_sat8,
+ gen_helper_neon_unarrow_sat16,
+ gen_helper_neon_unarrow_sat32,
+ };
+ if (u) {
+ genenvfn = sqxtunfns[size];
+ } else {
+ genfn = xtnfns[size];
+ }
+ break;
+ }
+ case 0x14: /* SQXTN, UQXTN */
+ {
+ static NeonGenNarrowEnvFn * const fns[3][2] = {
+ { gen_helper_neon_narrow_sat_s8,
+ gen_helper_neon_narrow_sat_u8 },
+ { gen_helper_neon_narrow_sat_s16,
+ gen_helper_neon_narrow_sat_u16 },
+ { gen_helper_neon_narrow_sat_s32,
+ gen_helper_neon_narrow_sat_u32 },
+ };
+ genenvfn = fns[size][u];
+ break;
+ }
+ default:
+ g_assert_not_reached();
+ }
+
+ if (genfn) {
+ genfn(tcg_res[pass], tcg_op);
+ } else {
+ genenvfn(tcg_res[pass], cpu_env, tcg_op);
+ }
+
+ tcg_temp_free_i64(tcg_op);
+ }
+
+ for (pass = 0; pass < 2; pass++) {
+ write_vec_element_i32(s, tcg_res[pass], rd, destelt + pass, MO_32);
+ tcg_temp_free_i32(tcg_res[pass]);
+ }
+ if (!is_q) {
+ clear_vec_high(s, rd);
+ }
+}
+
+static void handle_rev(DisasContext *s, int opcode, bool u,
+ bool is_q, int size, int rn, int rd)
+{
+ int op = (opcode << 1) | u;
+ int opsz = op + size;
+ int grp_size = 3 - opsz;
+ int dsize = is_q ? 128 : 64;
+ int i;
+
+ if (opsz >= 3) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (size == 0) {
+ /* Special case bytes, use bswap op on each group of elements */
+ int groups = dsize / (8 << grp_size);
+
+ for (i = 0; i < groups; i++) {
+ TCGv_i64 tcg_tmp = tcg_temp_new_i64();
+
+ read_vec_element(s, tcg_tmp, rn, i, grp_size);
+ switch (grp_size) {
+ case MO_16:
+ tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp);
+ break;
+ case MO_32:
+ tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp);
+ break;
+ case MO_64:
+ tcg_gen_bswap64_i64(tcg_tmp, tcg_tmp);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ write_vec_element(s, tcg_tmp, rd, i, grp_size);
+ tcg_temp_free_i64(tcg_tmp);
+ }
+ if (!is_q) {
+ clear_vec_high(s, rd);
+ }
+ } else {
+ int revmask = (1 << grp_size) - 1;
+ int esize = 8 << size;
+ int elements = dsize / esize;
+ TCGv_i64 tcg_rn = tcg_temp_new_i64();
+ TCGv_i64 tcg_rd = tcg_const_i64(0);
+ TCGv_i64 tcg_rd_hi = tcg_const_i64(0);
+
+ for (i = 0; i < elements; i++) {
+ int e_rev = (i & 0xf) ^ revmask;
+ int off = e_rev * esize;
+ read_vec_element(s, tcg_rn, rn, i, size);
+ if (off >= 64) {
+ tcg_gen_deposit_i64(tcg_rd_hi, tcg_rd_hi,
+ tcg_rn, off - 64, esize);
+ } else {
+ tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, off, esize);
+ }
+ }
+ write_vec_element(s, tcg_rd, rd, 0, MO_64);
+ write_vec_element(s, tcg_rd_hi, rd, 1, MO_64);
+
+ tcg_temp_free_i64(tcg_rd_hi);
+ tcg_temp_free_i64(tcg_rd);
+ tcg_temp_free_i64(tcg_rn);
+ }
+}
+
/* C3.6.17 AdvSIMD two reg misc
* 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
* +---+---+---+-----------+------+-----------+--------+-----+------+------+
@@ -6773,7 +7529,280 @@ static void disas_simd_three_reg_same(DisasContext *s, uint32_t insn)
*/
static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
{
- unsupported_encoding(s, insn);
+ int size = extract32(insn, 22, 2);
+ int opcode = extract32(insn, 12, 5);
+ bool u = extract32(insn, 29, 1);
+ bool is_q = extract32(insn, 30, 1);
+ int rn = extract32(insn, 5, 5);
+ int rd = extract32(insn, 0, 5);
+
+ switch (opcode) {
+ case 0x0: /* REV64, REV32 */
+ case 0x1: /* REV16 */
+ handle_rev(s, opcode, u, is_q, size, rn, rd);
+ return;
+ case 0x5: /* CNT, NOT, RBIT */
+ if (u && size == 0) {
+ /* NOT: adjust size so we can use the 64-bits-at-a-time loop. */
+ size = 3;
+ break;
+ } else if (u && size == 1) {
+ /* RBIT */
+ break;
+ } else if (!u && size == 0) {
+ /* CNT */
+ break;
+ }
+ unallocated_encoding(s);
+ return;
+ case 0x12: /* XTN, XTN2, SQXTUN, SQXTUN2 */
+ case 0x14: /* SQXTN, SQXTN2, UQXTN, UQXTN2 */
+ if (size == 3) {
+ unallocated_encoding(s);
+ return;
+ }
+ handle_2misc_narrow(s, opcode, u, is_q, size, rn, rd);
+ return;
+ case 0x2: /* SADDLP, UADDLP */
+ case 0x4: /* CLS, CLZ */
+ case 0x6: /* SADALP, UADALP */
+ if (size == 3) {
+ unallocated_encoding(s);
+ return;
+ }
+ unsupported_encoding(s, insn);
+ return;
+ case 0x13: /* SHLL, SHLL2 */
+ if (u == 0 || size == 3) {
+ unallocated_encoding(s);
+ return;
+ }
+ unsupported_encoding(s, insn);
+ return;
+ case 0xa: /* CMLT */
+ if (u == 1) {
+ unallocated_encoding(s);
+ return;
+ }
+ /* fall through */
+ case 0x8: /* CMGT, CMGE */
+ case 0x9: /* CMEQ, CMLE */
+ case 0xb: /* ABS, NEG */
+ if (size == 3 && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ case 0x3: /* SUQADD, USQADD */
+ case 0x7: /* SQABS, SQNEG */
+ if (size == 3 && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+ unsupported_encoding(s, insn);
+ return;
+ case 0xc ... 0xf:
+ case 0x16 ... 0x1d:
+ case 0x1f:
+ {
+ /* Floating point: U, size[1] and opcode indicate operation;
+ * size[0] indicates single or double precision.
+ */
+ opcode |= (extract32(size, 1, 1) << 5) | (u << 6);
+ size = extract32(size, 0, 1) ? 3 : 2;
+ switch (opcode) {
+ case 0x2f: /* FABS */
+ case 0x6f: /* FNEG */
+ if (size == 3 && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ case 0x16: /* FCVTN, FCVTN2 */
+ case 0x17: /* FCVTL, FCVTL2 */
+ case 0x18: /* FRINTN */
+ case 0x19: /* FRINTM */
+ case 0x1a: /* FCVTNS */
+ case 0x1b: /* FCVTMS */
+ case 0x1c: /* FCVTAS */
+ case 0x1d: /* SCVTF */
+ case 0x2c: /* FCMGT (zero) */
+ case 0x2d: /* FCMEQ (zero) */
+ case 0x2e: /* FCMLT (zero) */
+ case 0x38: /* FRINTP */
+ case 0x39: /* FRINTZ */
+ case 0x3a: /* FCVTPS */
+ case 0x3b: /* FCVTZS */
+ case 0x3c: /* URECPE */
+ case 0x3d: /* FRECPE */
+ case 0x56: /* FCVTXN, FCVTXN2 */
+ case 0x58: /* FRINTA */
+ case 0x59: /* FRINTX */
+ case 0x5a: /* FCVTNU */
+ case 0x5b: /* FCVTMU */
+ case 0x5c: /* FCVTAU */
+ case 0x5d: /* UCVTF */
+ case 0x6c: /* FCMGE (zero) */
+ case 0x6d: /* FCMLE (zero) */
+ case 0x79: /* FRINTI */
+ case 0x7a: /* FCVTPU */
+ case 0x7b: /* FCVTZU */
+ case 0x7c: /* URSQRTE */
+ case 0x7d: /* FRSQRTE */
+ case 0x7f: /* FSQRT */
+ unsupported_encoding(s, insn);
+ return;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ }
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (size == 3) {
+ /* All 64-bit element operations can be shared with scalar 2misc */
+ int pass;
+
+ for (pass = 0; pass < (is_q ? 2 : 1); pass++) {
+ TCGv_i64 tcg_op = tcg_temp_new_i64();
+ TCGv_i64 tcg_res = tcg_temp_new_i64();
+
+ read_vec_element(s, tcg_op, rn, pass, MO_64);
+
+ handle_2misc_64(s, opcode, u, tcg_res, tcg_op);
+
+ write_vec_element(s, tcg_res, rd, pass, MO_64);
+
+ tcg_temp_free_i64(tcg_res);
+ tcg_temp_free_i64(tcg_op);
+ }
+ } else {
+ int pass;
+
+ for (pass = 0; pass < (is_q ? 4 : 2); pass++) {
+ TCGv_i32 tcg_op = tcg_temp_new_i32();
+ TCGv_i32 tcg_res = tcg_temp_new_i32();
+ TCGCond cond;
+
+ read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
+
+ if (size == 2) {
+ /* Special cases for 32 bit elements */
+ switch (opcode) {
+ case 0xa: /* CMLT */
+ /* 32 bit integer comparison against zero, result is
+ * test ? (2^32 - 1) : 0. We implement via setcond(test)
+ * and inverting.
+ */
+ cond = TCG_COND_LT;
+ do_cmop:
+ tcg_gen_setcondi_i32(cond, tcg_res, tcg_op, 0);
+ tcg_gen_neg_i32(tcg_res, tcg_res);
+ break;
+ case 0x8: /* CMGT, CMGE */
+ cond = u ? TCG_COND_GE : TCG_COND_GT;
+ goto do_cmop;
+ case 0x9: /* CMEQ, CMLE */
+ cond = u ? TCG_COND_LE : TCG_COND_EQ;
+ goto do_cmop;
+ case 0xb: /* ABS, NEG */
+ if (u) {
+ tcg_gen_neg_i32(tcg_res, tcg_op);
+ } else {
+ TCGv_i32 tcg_zero = tcg_const_i32(0);
+ tcg_gen_neg_i32(tcg_res, tcg_op);
+ tcg_gen_movcond_i32(TCG_COND_GT, tcg_res, tcg_op,
+ tcg_zero, tcg_op, tcg_res);
+ tcg_temp_free_i32(tcg_zero);
+ }
+ break;
+ case 0x2f: /* FABS */
+ gen_helper_vfp_abss(tcg_res, tcg_op);
+ break;
+ case 0x6f: /* FNEG */
+ gen_helper_vfp_negs(tcg_res, tcg_op);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ } else {
+ /* Use helpers for 8 and 16 bit elements */
+ switch (opcode) {
+ case 0x5: /* CNT, RBIT */
+ /* For these two insns size is part of the opcode specifier
+ * (handled earlier); they always operate on byte elements.
+ */
+ if (u) {
+ gen_helper_neon_rbit_u8(tcg_res, tcg_op);
+ } else {
+ gen_helper_neon_cnt_u8(tcg_res, tcg_op);
+ }
+ break;
+ case 0x8: /* CMGT, CMGE */
+ case 0x9: /* CMEQ, CMLE */
+ case 0xa: /* CMLT */
+ {
+ static NeonGenTwoOpFn * const fns[3][2] = {
+ { gen_helper_neon_cgt_s8, gen_helper_neon_cgt_s16 },
+ { gen_helper_neon_cge_s8, gen_helper_neon_cge_s16 },
+ { gen_helper_neon_ceq_u8, gen_helper_neon_ceq_u16 },
+ };
+ NeonGenTwoOpFn *genfn;
+ int comp;
+ bool reverse;
+ TCGv_i32 tcg_zero = tcg_const_i32(0);
+
+ /* comp = index into [CMGT, CMGE, CMEQ, CMLE, CMLT] */
+ comp = (opcode - 0x8) * 2 + u;
+ /* ...but LE, LT are implemented as reverse GE, GT */
+ reverse = (comp > 2);
+ if (reverse) {
+ comp = 4 - comp;
+ }
+ genfn = fns[comp][size];
+ if (reverse) {
+ genfn(tcg_res, tcg_zero, tcg_op);
+ } else {
+ genfn(tcg_res, tcg_op, tcg_zero);
+ }
+ tcg_temp_free_i32(tcg_zero);
+ break;
+ }
+ case 0xb: /* ABS, NEG */
+ if (u) {
+ TCGv_i32 tcg_zero = tcg_const_i32(0);
+ if (size) {
+ gen_helper_neon_sub_u16(tcg_res, tcg_zero, tcg_op);
+ } else {
+ gen_helper_neon_sub_u8(tcg_res, tcg_zero, tcg_op);
+ }
+ tcg_temp_free_i32(tcg_zero);
+ } else {
+ if (size) {
+ gen_helper_neon_abs_s16(tcg_res, tcg_op);
+ } else {
+ gen_helper_neon_abs_s8(tcg_res, tcg_op);
+ }
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ }
+
+ write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
+
+ tcg_temp_free_i32(tcg_res);
+ tcg_temp_free_i32(tcg_op);
+ }
+ }
+ if (!is_q) {
+ clear_vec_high(s, rd);
+ }
}
/* C3.6.18 AdvSIMD vector x indexed element
@@ -7076,7 +8105,7 @@ done_generating:
qemu_log("----------------\n");
qemu_log("IN: %s\n", lookup_symbol(pc_start));
log_target_disas(env, pc_start, dc->pc - pc_start,
- dc->thumb | (dc->bswap_code << 1));
+ 4 | (dc->bswap_code << 1));
qemu_log("\n");
}
#endif
diff --git a/target-arm/translate.c b/target-arm/translate.c
index e701c0f9e1..782aab8b58 100644
--- a/target-arm/translate.c
+++ b/target-arm/translate.c
@@ -3142,16 +3142,19 @@ static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
VFP_DREG_N(rn, insn);
}
- if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
- /* Integer or single precision destination. */
+ if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18) ||
+ ((rn & 0x1e) == 0x6))) {
+ /* Integer or single/half precision destination. */
rd = VFP_SREG_D(insn);
} else {
VFP_DREG_D(rd, insn);
}
if (op == 15 &&
- (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
- /* VCVT from int is always from S reg regardless of dp bit.
- * VCVT with immediate frac_bits has same format as SREG_M
+ (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14) ||
+ ((rn & 0x1e) == 0x4))) {
+ /* VCVT from int or half precision is always from S reg
+ * regardless of dp bit. VCVT with immediate frac_bits
+ * has same format as SREG_M.
*/
rm = VFP_SREG_M(insn);
} else {
@@ -3241,12 +3244,19 @@ static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
case 5:
case 6:
case 7:
- /* VCVTB, VCVTT: only present with the halfprec extension,
- * UNPREDICTABLE if bit 8 is set (we choose to UNDEF)
+ /* VCVTB, VCVTT: only present with the halfprec extension
+ * UNPREDICTABLE if bit 8 is set prior to ARMv8
+ * (we choose to UNDEF)
*/
- if (dp || !arm_feature(env, ARM_FEATURE_VFP_FP16)) {
+ if ((dp && !arm_feature(env, ARM_FEATURE_V8)) ||
+ !arm_feature(env, ARM_FEATURE_VFP_FP16)) {
return 1;
}
+ if (!extract32(rn, 1, 1)) {
+ /* Half precision source. */
+ gen_mov_F0_vreg(0, rm);
+ break;
+ }
/* Otherwise fall through */
default:
/* One source operand. */
@@ -3394,21 +3404,39 @@ static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
case 3: /* sqrt */
gen_vfp_sqrt(dp);
break;
- case 4: /* vcvtb.f32.f16 */
+ case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
tmp = gen_vfp_mrs();
tcg_gen_ext16u_i32(tmp, tmp);
- gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
+ if (dp) {
+ gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
+ cpu_env);
+ } else {
+ gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
+ cpu_env);
+ }
tcg_temp_free_i32(tmp);
break;
- case 5: /* vcvtt.f32.f16 */
+ case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
tmp = gen_vfp_mrs();
tcg_gen_shri_i32(tmp, tmp, 16);
- gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
+ if (dp) {
+ gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
+ cpu_env);
+ } else {
+ gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
+ cpu_env);
+ }
tcg_temp_free_i32(tmp);
break;
- case 6: /* vcvtb.f16.f32 */
+ case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
tmp = tcg_temp_new_i32();
- gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
+ if (dp) {
+ gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
+ cpu_env);
+ } else {
+ gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
+ cpu_env);
+ }
gen_mov_F0_vreg(0, rd);
tmp2 = gen_vfp_mrs();
tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
@@ -3416,9 +3444,15 @@ static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
tcg_temp_free_i32(tmp2);
gen_vfp_msr(tmp);
break;
- case 7: /* vcvtt.f16.f32 */
+ case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
tmp = tcg_temp_new_i32();
- gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
+ if (dp) {
+ gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
+ cpu_env);
+ } else {
+ gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
+ cpu_env);
+ }
tcg_gen_shli_i32(tmp, tmp, 16);
gen_mov_F0_vreg(0, rd);
tmp2 = gen_vfp_mrs();
@@ -3551,16 +3585,21 @@ static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
}
/* Write back the result. */
- if (op == 15 && (rn >= 8 && rn <= 11))
- ; /* Comparison, do nothing. */
- else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
- /* VCVT double to int: always integer result. */
+ if (op == 15 && (rn >= 8 && rn <= 11)) {
+ /* Comparison, do nothing. */
+ } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 ||
+ (rn & 0x1e) == 0x6)) {
+ /* VCVT double to int: always integer result.
+ * VCVT double to half precision is always a single
+ * precision result.
+ */
gen_mov_vreg_F0(0, rd);
- else if (op == 15 && rn == 15)
+ } else if (op == 15 && rn == 15) {
/* conversion */
gen_mov_vreg_F0(!dp, rd);
- else
+ } else {
gen_mov_vreg_F0(dp, rd);
+ }
/* break out of the loop if we have finished */
if (veclen == 0)
diff --git a/target-i386/arch_memory_mapping.c b/target-i386/arch_memory_mapping.c
index 462f984a26..2d35f63e1e 100644
--- a/target-i386/arch_memory_mapping.c
+++ b/target-i386/arch_memory_mapping.c
@@ -16,7 +16,8 @@
#include "sysemu/memory_mapping.h"
/* PAE Paging or IA-32e Paging */
-static void walk_pte(MemoryMappingList *list, hwaddr pte_start_addr,
+static void walk_pte(MemoryMappingList *list, AddressSpace *as,
+ hwaddr pte_start_addr,
int32_t a20_mask, target_ulong start_line_addr)
{
hwaddr pte_addr, start_paddr;
@@ -26,7 +27,7 @@ static void walk_pte(MemoryMappingList *list, hwaddr pte_start_addr,
for (i = 0; i < 512; i++) {
pte_addr = (pte_start_addr + i * 8) & a20_mask;
- pte = ldq_phys(pte_addr);
+ pte = ldq_phys(as, pte_addr);
if (!(pte & PG_PRESENT_MASK)) {
/* not present */
continue;
@@ -45,7 +46,7 @@ static void walk_pte(MemoryMappingList *list, hwaddr pte_start_addr,
}
/* 32-bit Paging */
-static void walk_pte2(MemoryMappingList *list,
+static void walk_pte2(MemoryMappingList *list, AddressSpace *as,
hwaddr pte_start_addr, int32_t a20_mask,
target_ulong start_line_addr)
{
@@ -56,7 +57,7 @@ static void walk_pte2(MemoryMappingList *list,
for (i = 0; i < 1024; i++) {
pte_addr = (pte_start_addr + i * 4) & a20_mask;
- pte = ldl_phys(pte_addr);
+ pte = ldl_phys(as, pte_addr);
if (!(pte & PG_PRESENT_MASK)) {
/* not present */
continue;
@@ -77,7 +78,8 @@ static void walk_pte2(MemoryMappingList *list,
/* PAE Paging or IA-32e Paging */
#define PLM4_ADDR_MASK 0xffffffffff000ULL /* selects bits 51:12 */
-static void walk_pde(MemoryMappingList *list, hwaddr pde_start_addr,
+static void walk_pde(MemoryMappingList *list, AddressSpace *as,
+ hwaddr pde_start_addr,
int32_t a20_mask, target_ulong start_line_addr)
{
hwaddr pde_addr, pte_start_addr, start_paddr;
@@ -87,7 +89,7 @@ static void walk_pde(MemoryMappingList *list, hwaddr pde_start_addr,
for (i = 0; i < 512; i++) {
pde_addr = (pde_start_addr + i * 8) & a20_mask;
- pde = ldq_phys(pde_addr);
+ pde = ldq_phys(as, pde_addr);
if (!(pde & PG_PRESENT_MASK)) {
/* not present */
continue;
@@ -108,12 +110,12 @@ static void walk_pde(MemoryMappingList *list, hwaddr pde_start_addr,
}
pte_start_addr = (pde & PLM4_ADDR_MASK) & a20_mask;
- walk_pte(list, pte_start_addr, a20_mask, line_addr);
+ walk_pte(list, as, pte_start_addr, a20_mask, line_addr);
}
}
/* 32-bit Paging */
-static void walk_pde2(MemoryMappingList *list,
+static void walk_pde2(MemoryMappingList *list, AddressSpace *as,
hwaddr pde_start_addr, int32_t a20_mask,
bool pse)
{
@@ -124,7 +126,7 @@ static void walk_pde2(MemoryMappingList *list,
for (i = 0; i < 1024; i++) {
pde_addr = (pde_start_addr + i * 4) & a20_mask;
- pde = ldl_phys(pde_addr);
+ pde = ldl_phys(as, pde_addr);
if (!(pde & PG_PRESENT_MASK)) {
/* not present */
continue;
@@ -150,12 +152,12 @@ static void walk_pde2(MemoryMappingList *list,
}
pte_start_addr = (pde & ~0xfff) & a20_mask;
- walk_pte2(list, pte_start_addr, a20_mask, line_addr);
+ walk_pte2(list, as, pte_start_addr, a20_mask, line_addr);
}
}
/* PAE Paging */
-static void walk_pdpe2(MemoryMappingList *list,
+static void walk_pdpe2(MemoryMappingList *list, AddressSpace *as,
hwaddr pdpe_start_addr, int32_t a20_mask)
{
hwaddr pdpe_addr, pde_start_addr;
@@ -165,7 +167,7 @@ static void walk_pdpe2(MemoryMappingList *list,
for (i = 0; i < 4; i++) {
pdpe_addr = (pdpe_start_addr + i * 8) & a20_mask;
- pdpe = ldq_phys(pdpe_addr);
+ pdpe = ldq_phys(as, pdpe_addr);
if (!(pdpe & PG_PRESENT_MASK)) {
/* not present */
continue;
@@ -173,13 +175,13 @@ static void walk_pdpe2(MemoryMappingList *list,
line_addr = (((unsigned int)i & 0x3) << 30);
pde_start_addr = (pdpe & ~0xfff) & a20_mask;
- walk_pde(list, pde_start_addr, a20_mask, line_addr);
+ walk_pde(list, as, pde_start_addr, a20_mask, line_addr);
}
}
#ifdef TARGET_X86_64
/* IA-32e Paging */
-static void walk_pdpe(MemoryMappingList *list,
+static void walk_pdpe(MemoryMappingList *list, AddressSpace *as,
hwaddr pdpe_start_addr, int32_t a20_mask,
target_ulong start_line_addr)
{
@@ -190,7 +192,7 @@ static void walk_pdpe(MemoryMappingList *list,
for (i = 0; i < 512; i++) {
pdpe_addr = (pdpe_start_addr + i * 8) & a20_mask;
- pdpe = ldq_phys(pdpe_addr);
+ pdpe = ldq_phys(as, pdpe_addr);
if (!(pdpe & PG_PRESENT_MASK)) {
/* not present */
continue;
@@ -211,12 +213,12 @@ static void walk_pdpe(MemoryMappingList *list,
}
pde_start_addr = (pdpe & PLM4_ADDR_MASK) & a20_mask;
- walk_pde(list, pde_start_addr, a20_mask, line_addr);
+ walk_pde(list, as, pde_start_addr, a20_mask, line_addr);
}
}
/* IA-32e Paging */
-static void walk_pml4e(MemoryMappingList *list,
+static void walk_pml4e(MemoryMappingList *list, AddressSpace *as,
hwaddr pml4e_start_addr, int32_t a20_mask)
{
hwaddr pml4e_addr, pdpe_start_addr;
@@ -226,7 +228,7 @@ static void walk_pml4e(MemoryMappingList *list,
for (i = 0; i < 512; i++) {
pml4e_addr = (pml4e_start_addr + i * 8) & a20_mask;
- pml4e = ldq_phys(pml4e_addr);
+ pml4e = ldq_phys(as, pml4e_addr);
if (!(pml4e & PG_PRESENT_MASK)) {
/* not present */
continue;
@@ -234,7 +236,7 @@ static void walk_pml4e(MemoryMappingList *list,
line_addr = ((i & 0x1ffULL) << 39) | (0xffffULL << 48);
pdpe_start_addr = (pml4e & PLM4_ADDR_MASK) & a20_mask;
- walk_pdpe(list, pdpe_start_addr, a20_mask, line_addr);
+ walk_pdpe(list, as, pdpe_start_addr, a20_mask, line_addr);
}
}
#endif
@@ -256,14 +258,14 @@ void x86_cpu_get_memory_mapping(CPUState *cs, MemoryMappingList *list,
hwaddr pml4e_addr;
pml4e_addr = (env->cr[3] & PLM4_ADDR_MASK) & env->a20_mask;
- walk_pml4e(list, pml4e_addr, env->a20_mask);
+ walk_pml4e(list, cs->as, pml4e_addr, env->a20_mask);
} else
#endif
{
hwaddr pdpe_addr;
pdpe_addr = (env->cr[3] & ~0x1f) & env->a20_mask;
- walk_pdpe2(list, pdpe_addr, env->a20_mask);
+ walk_pdpe2(list, cs->as, pdpe_addr, env->a20_mask);
}
} else {
hwaddr pde_addr;
@@ -271,7 +273,7 @@ void x86_cpu_get_memory_mapping(CPUState *cs, MemoryMappingList *list,
pde_addr = (env->cr[3] & ~0xfff) & env->a20_mask;
pse = !!(env->cr[4] & CR4_PSE_MASK);
- walk_pde2(list, pde_addr, env->a20_mask, pse);
+ walk_pde2(list, cs->as, pde_addr, env->a20_mask, pse);
}
}
diff --git a/target-i386/helper.c b/target-i386/helper.c
index fe613b26e1..55c04577dc 100644
--- a/target-i386/helper.c
+++ b/target-i386/helper.c
@@ -515,6 +515,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
int is_write1, int mmu_idx)
{
+ CPUState *cs = ENV_GET_CPU(env);
uint64_t ptep, pte;
target_ulong pde_addr, pte_addr;
int error_code, is_dirty, prot, page_size, is_write, is_user;
@@ -562,7 +563,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
env->a20_mask;
- pml4e = ldq_phys(pml4e_addr);
+ pml4e = ldq_phys(cs->as, pml4e_addr);
if (!(pml4e & PG_PRESENT_MASK)) {
error_code = 0;
goto do_fault;
@@ -573,12 +574,12 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
}
if (!(pml4e & PG_ACCESSED_MASK)) {
pml4e |= PG_ACCESSED_MASK;
- stl_phys_notdirty(pml4e_addr, pml4e);
+ stl_phys_notdirty(cs->as, pml4e_addr, pml4e);
}
ptep = pml4e ^ PG_NX_MASK;
pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
env->a20_mask;
- pdpe = ldq_phys(pdpe_addr);
+ pdpe = ldq_phys(cs->as, pdpe_addr);
if (!(pdpe & PG_PRESENT_MASK)) {
error_code = 0;
goto do_fault;
@@ -590,7 +591,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
ptep &= pdpe ^ PG_NX_MASK;
if (!(pdpe & PG_ACCESSED_MASK)) {
pdpe |= PG_ACCESSED_MASK;
- stl_phys_notdirty(pdpe_addr, pdpe);
+ stl_phys_notdirty(cs->as, pdpe_addr, pdpe);
}
} else
#endif
@@ -598,7 +599,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
/* XXX: load them when cr3 is loaded ? */
pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
env->a20_mask;
- pdpe = ldq_phys(pdpe_addr);
+ pdpe = ldq_phys(cs->as, pdpe_addr);
if (!(pdpe & PG_PRESENT_MASK)) {
error_code = 0;
goto do_fault;
@@ -608,7 +609,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
env->a20_mask;
- pde = ldq_phys(pde_addr);
+ pde = ldq_phys(cs->as, pde_addr);
if (!(pde & PG_PRESENT_MASK)) {
error_code = 0;
goto do_fault;
@@ -660,7 +661,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
pde |= PG_ACCESSED_MASK;
if (is_dirty)
pde |= PG_DIRTY_MASK;
- stl_phys_notdirty(pde_addr, pde);
+ stl_phys_notdirty(cs->as, pde_addr, pde);
}
/* align to page_size */
pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
@@ -669,11 +670,11 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
/* 4 KB page */
if (!(pde & PG_ACCESSED_MASK)) {
pde |= PG_ACCESSED_MASK;
- stl_phys_notdirty(pde_addr, pde);
+ stl_phys_notdirty(cs->as, pde_addr, pde);
}
pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
env->a20_mask;
- pte = ldq_phys(pte_addr);
+ pte = ldq_phys(cs->as, pte_addr);
if (!(pte & PG_PRESENT_MASK)) {
error_code = 0;
goto do_fault;
@@ -722,7 +723,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
pte |= PG_ACCESSED_MASK;
if (is_dirty)
pte |= PG_DIRTY_MASK;
- stl_phys_notdirty(pte_addr, pte);
+ stl_phys_notdirty(cs->as, pte_addr, pte);
}
page_size = 4096;
virt_addr = addr & ~0xfff;
@@ -734,7 +735,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
/* page directory entry */
pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
env->a20_mask;
- pde = ldl_phys(pde_addr);
+ pde = ldl_phys(cs->as, pde_addr);
if (!(pde & PG_PRESENT_MASK)) {
error_code = 0;
goto do_fault;
@@ -777,7 +778,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
pde |= PG_ACCESSED_MASK;
if (is_dirty)
pde |= PG_DIRTY_MASK;
- stl_phys_notdirty(pde_addr, pde);
+ stl_phys_notdirty(cs->as, pde_addr, pde);
}
pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
@@ -786,13 +787,13 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
} else {
if (!(pde & PG_ACCESSED_MASK)) {
pde |= PG_ACCESSED_MASK;
- stl_phys_notdirty(pde_addr, pde);
+ stl_phys_notdirty(cs->as, pde_addr, pde);
}
/* page directory entry */
pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
env->a20_mask;
- pte = ldl_phys(pte_addr);
+ pte = ldl_phys(cs->as, pte_addr);
if (!(pte & PG_PRESENT_MASK)) {
error_code = 0;
goto do_fault;
@@ -834,7 +835,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
pte |= PG_ACCESSED_MASK;
if (is_dirty)
pte |= PG_DIRTY_MASK;
- stl_phys_notdirty(pte_addr, pte);
+ stl_phys_notdirty(cs->as, pte_addr, pte);
}
page_size = 4096;
virt_addr = addr & ~0xfff;
@@ -880,7 +881,8 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
error_code |= PG_ERROR_I_D_MASK;
if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
/* cr2 is not modified in case of exceptions */
- stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
+ stq_phys(cs->as,
+ env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
addr);
} else {
env->cr[2] = addr;
@@ -919,13 +921,13 @@ hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
env->a20_mask;
- pml4e = ldq_phys(pml4e_addr);
+ pml4e = ldq_phys(cs->as, pml4e_addr);
if (!(pml4e & PG_PRESENT_MASK))
return -1;
pdpe_addr = ((pml4e & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
(((addr >> 30) & 0x1ff) << 3)) & env->a20_mask;
- pdpe = ldq_phys(pdpe_addr);
+ pdpe = ldq_phys(cs->as, pdpe_addr);
if (!(pdpe & PG_PRESENT_MASK))
return -1;
} else
@@ -933,14 +935,14 @@ hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
{
pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
env->a20_mask;
- pdpe = ldq_phys(pdpe_addr);
+ pdpe = ldq_phys(cs->as, pdpe_addr);
if (!(pdpe & PG_PRESENT_MASK))
return -1;
}
pde_addr = ((pdpe & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
(((addr >> 21) & 0x1ff) << 3)) & env->a20_mask;
- pde = ldq_phys(pde_addr);
+ pde = ldq_phys(cs->as, pde_addr);
if (!(pde & PG_PRESENT_MASK)) {
return -1;
}
@@ -953,7 +955,7 @@ hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
pte_addr = ((pde & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
(((addr >> 12) & 0x1ff) << 3)) & env->a20_mask;
page_size = 4096;
- pte = ldq_phys(pte_addr);
+ pte = ldq_phys(cs->as, pte_addr);
}
pte &= ~(PG_NX_MASK | PG_HI_USER_MASK);
if (!(pte & PG_PRESENT_MASK))
@@ -963,7 +965,7 @@ hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
/* page directory entry */
pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
- pde = ldl_phys(pde_addr);
+ pde = ldl_phys(cs->as, pde_addr);
if (!(pde & PG_PRESENT_MASK))
return -1;
if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
@@ -972,7 +974,7 @@ hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
} else {
/* page directory entry */
pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
- pte = ldl_phys(pte_addr);
+ pte = ldl_phys(cs->as, pte_addr);
if (!(pte & PG_PRESENT_MASK))
return -1;
page_size = 4096;
diff --git a/target-i386/seg_helper.c b/target-i386/seg_helper.c
index e78910200a..959212bfe3 100644
--- a/target-i386/seg_helper.c
+++ b/target-i386/seg_helper.c
@@ -1131,7 +1131,8 @@ static void do_interrupt_user(CPUX86State *env, int intno, int is_int,
static void handle_even_inj(CPUX86State *env, int intno, int is_int,
int error_code, int is_hw, int rm)
{
- uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
+ CPUState *cs = ENV_GET_CPU(env);
+ uint32_t event_inj = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
control.event_inj));
if (!(event_inj & SVM_EVTINJ_VALID)) {
@@ -1145,11 +1146,12 @@ static void handle_even_inj(CPUX86State *env, int intno, int is_int,
event_inj = intno | type | SVM_EVTINJ_VALID;
if (!rm && exception_has_error_code(intno)) {
event_inj |= SVM_EVTINJ_VALID_ERR;
- stl_phys(env->vm_vmcb + offsetof(struct vmcb,
+ stl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
control.event_inj_err),
error_code);
}
- stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
+ stl_phys(cs->as,
+ env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
event_inj);
}
}
@@ -1225,11 +1227,13 @@ static void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
#if !defined(CONFIG_USER_ONLY)
if (env->hflags & HF_SVMI_MASK) {
- uint32_t event_inj = ldl_phys(env->vm_vmcb +
+ CPUState *cs = CPU(cpu);
+ uint32_t event_inj = ldl_phys(cs->as, env->vm_vmcb +
offsetof(struct vmcb,
control.event_inj));
- stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
+ stl_phys(cs->as,
+ env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
event_inj & ~SVM_EVTINJ_VALID);
}
#endif
diff --git a/target-i386/smm_helper.c b/target-i386/smm_helper.c
index 6cb45511b8..71c64b2479 100644
--- a/target-i386/smm_helper.c
+++ b/target-i386/smm_helper.c
@@ -43,6 +43,7 @@ void helper_rsm(CPUX86State *env)
void do_smm_enter(X86CPU *cpu)
{
CPUX86State *env = &cpu->env;
+ CPUState *cs = CPU(cpu);
target_ulong sm_state;
SegmentCache *dt;
int i, offset;
@@ -59,83 +60,83 @@ void do_smm_enter(X86CPU *cpu)
for (i = 0; i < 6; i++) {
dt = &env->segs[i];
offset = 0x7e00 + i * 16;
- stw_phys(sm_state + offset, dt->selector);
- stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
- stl_phys(sm_state + offset + 4, dt->limit);
- stq_phys(sm_state + offset + 8, dt->base);
+ stw_phys(cs->as, sm_state + offset, dt->selector);
+ stw_phys(cs->as, sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
+ stl_phys(cs->as, sm_state + offset + 4, dt->limit);
+ stq_phys(cs->as, sm_state + offset + 8, dt->base);
}
- stq_phys(sm_state + 0x7e68, env->gdt.base);
- stl_phys(sm_state + 0x7e64, env->gdt.limit);
+ stq_phys(cs->as, sm_state + 0x7e68, env->gdt.base);
+ stl_phys(cs->as, sm_state + 0x7e64, env->gdt.limit);
- stw_phys(sm_state + 0x7e70, env->ldt.selector);
- stq_phys(sm_state + 0x7e78, env->ldt.base);
- stl_phys(sm_state + 0x7e74, env->ldt.limit);
- stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
+ stw_phys(cs->as, sm_state + 0x7e70, env->ldt.selector);
+ stq_phys(cs->as, sm_state + 0x7e78, env->ldt.base);
+ stl_phys(cs->as, sm_state + 0x7e74, env->ldt.limit);
+ stw_phys(cs->as, sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
- stq_phys(sm_state + 0x7e88, env->idt.base);
- stl_phys(sm_state + 0x7e84, env->idt.limit);
+ stq_phys(cs->as, sm_state + 0x7e88, env->idt.base);
+ stl_phys(cs->as, sm_state + 0x7e84, env->idt.limit);
- stw_phys(sm_state + 0x7e90, env->tr.selector);
- stq_phys(sm_state + 0x7e98, env->tr.base);
- stl_phys(sm_state + 0x7e94, env->tr.limit);
- stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
+ stw_phys(cs->as, sm_state + 0x7e90, env->tr.selector);
+ stq_phys(cs->as, sm_state + 0x7e98, env->tr.base);
+ stl_phys(cs->as, sm_state + 0x7e94, env->tr.limit);
+ stw_phys(cs->as, sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
- stq_phys(sm_state + 0x7ed0, env->efer);
+ stq_phys(cs->as, sm_state + 0x7ed0, env->efer);
- stq_phys(sm_state + 0x7ff8, env->regs[R_EAX]);
- stq_phys(sm_state + 0x7ff0, env->regs[R_ECX]);
- stq_phys(sm_state + 0x7fe8, env->regs[R_EDX]);
- stq_phys(sm_state + 0x7fe0, env->regs[R_EBX]);
- stq_phys(sm_state + 0x7fd8, env->regs[R_ESP]);
- stq_phys(sm_state + 0x7fd0, env->regs[R_EBP]);
- stq_phys(sm_state + 0x7fc8, env->regs[R_ESI]);
- stq_phys(sm_state + 0x7fc0, env->regs[R_EDI]);
+ stq_phys(cs->as, sm_state + 0x7ff8, env->regs[R_EAX]);
+ stq_phys(cs->as, sm_state + 0x7ff0, env->regs[R_ECX]);
+ stq_phys(cs->as, sm_state + 0x7fe8, env->regs[R_EDX]);
+ stq_phys(cs->as, sm_state + 0x7fe0, env->regs[R_EBX]);
+ stq_phys(cs->as, sm_state + 0x7fd8, env->regs[R_ESP]);
+ stq_phys(cs->as, sm_state + 0x7fd0, env->regs[R_EBP]);
+ stq_phys(cs->as, sm_state + 0x7fc8, env->regs[R_ESI]);
+ stq_phys(cs->as, sm_state + 0x7fc0, env->regs[R_EDI]);
for (i = 8; i < 16; i++) {
- stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
+ stq_phys(cs->as, sm_state + 0x7ff8 - i * 8, env->regs[i]);
}
- stq_phys(sm_state + 0x7f78, env->eip);
- stl_phys(sm_state + 0x7f70, cpu_compute_eflags(env));
- stl_phys(sm_state + 0x7f68, env->dr[6]);
- stl_phys(sm_state + 0x7f60, env->dr[7]);
+ stq_phys(cs->as, sm_state + 0x7f78, env->eip);
+ stl_phys(cs->as, sm_state + 0x7f70, cpu_compute_eflags(env));
+ stl_phys(cs->as, sm_state + 0x7f68, env->dr[6]);
+ stl_phys(cs->as, sm_state + 0x7f60, env->dr[7]);
- stl_phys(sm_state + 0x7f48, env->cr[4]);
- stl_phys(sm_state + 0x7f50, env->cr[3]);
- stl_phys(sm_state + 0x7f58, env->cr[0]);
+ stl_phys(cs->as, sm_state + 0x7f48, env->cr[4]);
+ stl_phys(cs->as, sm_state + 0x7f50, env->cr[3]);
+ stl_phys(cs->as, sm_state + 0x7f58, env->cr[0]);
- stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
- stl_phys(sm_state + 0x7f00, env->smbase);
+ stl_phys(cs->as, sm_state + 0x7efc, SMM_REVISION_ID);
+ stl_phys(cs->as, sm_state + 0x7f00, env->smbase);
#else
- stl_phys(sm_state + 0x7ffc, env->cr[0]);
- stl_phys(sm_state + 0x7ff8, env->cr[3]);
- stl_phys(sm_state + 0x7ff4, cpu_compute_eflags(env));
- stl_phys(sm_state + 0x7ff0, env->eip);
- stl_phys(sm_state + 0x7fec, env->regs[R_EDI]);
- stl_phys(sm_state + 0x7fe8, env->regs[R_ESI]);
- stl_phys(sm_state + 0x7fe4, env->regs[R_EBP]);
- stl_phys(sm_state + 0x7fe0, env->regs[R_ESP]);
- stl_phys(sm_state + 0x7fdc, env->regs[R_EBX]);
- stl_phys(sm_state + 0x7fd8, env->regs[R_EDX]);
- stl_phys(sm_state + 0x7fd4, env->regs[R_ECX]);
- stl_phys(sm_state + 0x7fd0, env->regs[R_EAX]);
- stl_phys(sm_state + 0x7fcc, env->dr[6]);
- stl_phys(sm_state + 0x7fc8, env->dr[7]);
-
- stl_phys(sm_state + 0x7fc4, env->tr.selector);
- stl_phys(sm_state + 0x7f64, env->tr.base);
- stl_phys(sm_state + 0x7f60, env->tr.limit);
- stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
-
- stl_phys(sm_state + 0x7fc0, env->ldt.selector);
- stl_phys(sm_state + 0x7f80, env->ldt.base);
- stl_phys(sm_state + 0x7f7c, env->ldt.limit);
- stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
-
- stl_phys(sm_state + 0x7f74, env->gdt.base);
- stl_phys(sm_state + 0x7f70, env->gdt.limit);
-
- stl_phys(sm_state + 0x7f58, env->idt.base);
- stl_phys(sm_state + 0x7f54, env->idt.limit);
+ stl_phys(cs->as, sm_state + 0x7ffc, env->cr[0]);
+ stl_phys(cs->as, sm_state + 0x7ff8, env->cr[3]);
+ stl_phys(cs->as, sm_state + 0x7ff4, cpu_compute_eflags(env));
+ stl_phys(cs->as, sm_state + 0x7ff0, env->eip);
+ stl_phys(cs->as, sm_state + 0x7fec, env->regs[R_EDI]);
+ stl_phys(cs->as, sm_state + 0x7fe8, env->regs[R_ESI]);
+ stl_phys(cs->as, sm_state + 0x7fe4, env->regs[R_EBP]);
+ stl_phys(cs->as, sm_state + 0x7fe0, env->regs[R_ESP]);
+ stl_phys(cs->as, sm_state + 0x7fdc, env->regs[R_EBX]);
+ stl_phys(cs->as, sm_state + 0x7fd8, env->regs[R_EDX]);
+ stl_phys(cs->as, sm_state + 0x7fd4, env->regs[R_ECX]);
+ stl_phys(cs->as, sm_state + 0x7fd0, env->regs[R_EAX]);
+ stl_phys(cs->as, sm_state + 0x7fcc, env->dr[6]);
+ stl_phys(cs->as, sm_state + 0x7fc8, env->dr[7]);
+
+ stl_phys(cs->as, sm_state + 0x7fc4, env->tr.selector);
+ stl_phys(cs->as, sm_state + 0x7f64, env->tr.base);
+ stl_phys(cs->as, sm_state + 0x7f60, env->tr.limit);
+ stl_phys(cs->as, sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
+
+ stl_phys(cs->as, sm_state + 0x7fc0, env->ldt.selector);
+ stl_phys(cs->as, sm_state + 0x7f80, env->ldt.base);
+ stl_phys(cs->as, sm_state + 0x7f7c, env->ldt.limit);
+ stl_phys(cs->as, sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
+
+ stl_phys(cs->as, sm_state + 0x7f74, env->gdt.base);
+ stl_phys(cs->as, sm_state + 0x7f70, env->gdt.limit);
+
+ stl_phys(cs->as, sm_state + 0x7f58, env->idt.base);
+ stl_phys(cs->as, sm_state + 0x7f54, env->idt.limit);
for (i = 0; i < 6; i++) {
dt = &env->segs[i];
@@ -144,15 +145,15 @@ void do_smm_enter(X86CPU *cpu)
} else {
offset = 0x7f2c + (i - 3) * 12;
}
- stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
- stl_phys(sm_state + offset + 8, dt->base);
- stl_phys(sm_state + offset + 4, dt->limit);
- stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
+ stl_phys(cs->as, sm_state + 0x7fa8 + i * 4, dt->selector);
+ stl_phys(cs->as, sm_state + offset + 8, dt->base);
+ stl_phys(cs->as, sm_state + offset + 4, dt->limit);
+ stl_phys(cs->as, sm_state + offset, (dt->flags >> 8) & 0xf0ff);
}
- stl_phys(sm_state + 0x7f14, env->cr[4]);
+ stl_phys(cs->as, sm_state + 0x7f14, env->cr[4]);
- stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
- stl_phys(sm_state + 0x7ef8, env->smbase);
+ stl_phys(cs->as, sm_state + 0x7efc, SMM_REVISION_ID);
+ stl_phys(cs->as, sm_state + 0x7ef8, env->smbase);
#endif
/* init SMM cpu state */
@@ -180,6 +181,7 @@ void do_smm_enter(X86CPU *cpu)
void helper_rsm(CPUX86State *env)
{
+ CPUState *cs = ENV_GET_CPU(env);
X86CPU *cpu = x86_env_get_cpu(env);
target_ulong sm_state;
int i, offset;
@@ -187,91 +189,91 @@ void helper_rsm(CPUX86State *env)
sm_state = env->smbase + 0x8000;
#ifdef TARGET_X86_64
- cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
+ cpu_load_efer(env, ldq_phys(cs->as, sm_state + 0x7ed0));
for (i = 0; i < 6; i++) {
offset = 0x7e00 + i * 16;
cpu_x86_load_seg_cache(env, i,
- lduw_phys(sm_state + offset),
- ldq_phys(sm_state + offset + 8),
- ldl_phys(sm_state + offset + 4),
- (lduw_phys(sm_state + offset + 2) &
+ lduw_phys(cs->as, sm_state + offset),
+ ldq_phys(cs->as, sm_state + offset + 8),
+ ldl_phys(cs->as, sm_state + offset + 4),
+ (lduw_phys(cs->as, sm_state + offset + 2) &
0xf0ff) << 8);
}
- env->gdt.base = ldq_phys(sm_state + 0x7e68);
- env->gdt.limit = ldl_phys(sm_state + 0x7e64);
-
- env->ldt.selector = lduw_phys(sm_state + 0x7e70);
- env->ldt.base = ldq_phys(sm_state + 0x7e78);
- env->ldt.limit = ldl_phys(sm_state + 0x7e74);
- env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
-
- env->idt.base = ldq_phys(sm_state + 0x7e88);
- env->idt.limit = ldl_phys(sm_state + 0x7e84);
-
- env->tr.selector = lduw_phys(sm_state + 0x7e90);
- env->tr.base = ldq_phys(sm_state + 0x7e98);
- env->tr.limit = ldl_phys(sm_state + 0x7e94);
- env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
-
- env->regs[R_EAX] = ldq_phys(sm_state + 0x7ff8);
- env->regs[R_ECX] = ldq_phys(sm_state + 0x7ff0);
- env->regs[R_EDX] = ldq_phys(sm_state + 0x7fe8);
- env->regs[R_EBX] = ldq_phys(sm_state + 0x7fe0);
- env->regs[R_ESP] = ldq_phys(sm_state + 0x7fd8);
- env->regs[R_EBP] = ldq_phys(sm_state + 0x7fd0);
- env->regs[R_ESI] = ldq_phys(sm_state + 0x7fc8);
- env->regs[R_EDI] = ldq_phys(sm_state + 0x7fc0);
+ env->gdt.base = ldq_phys(cs->as, sm_state + 0x7e68);
+ env->gdt.limit = ldl_phys(cs->as, sm_state + 0x7e64);
+
+ env->ldt.selector = lduw_phys(cs->as, sm_state + 0x7e70);
+ env->ldt.base = ldq_phys(cs->as, sm_state + 0x7e78);
+ env->ldt.limit = ldl_phys(cs->as, sm_state + 0x7e74);
+ env->ldt.flags = (lduw_phys(cs->as, sm_state + 0x7e72) & 0xf0ff) << 8;
+
+ env->idt.base = ldq_phys(cs->as, sm_state + 0x7e88);
+ env->idt.limit = ldl_phys(cs->as, sm_state + 0x7e84);
+
+ env->tr.selector = lduw_phys(cs->as, sm_state + 0x7e90);
+ env->tr.base = ldq_phys(cs->as, sm_state + 0x7e98);
+ env->tr.limit = ldl_phys(cs->as, sm_state + 0x7e94);
+ env->tr.flags = (lduw_phys(cs->as, sm_state + 0x7e92) & 0xf0ff) << 8;
+
+ env->regs[R_EAX] = ldq_phys(cs->as, sm_state + 0x7ff8);
+ env->regs[R_ECX] = ldq_phys(cs->as, sm_state + 0x7ff0);
+ env->regs[R_EDX] = ldq_phys(cs->as, sm_state + 0x7fe8);
+ env->regs[R_EBX] = ldq_phys(cs->as, sm_state + 0x7fe0);
+ env->regs[R_ESP] = ldq_phys(cs->as, sm_state + 0x7fd8);
+ env->regs[R_EBP] = ldq_phys(cs->as, sm_state + 0x7fd0);
+ env->regs[R_ESI] = ldq_phys(cs->as, sm_state + 0x7fc8);
+ env->regs[R_EDI] = ldq_phys(cs->as, sm_state + 0x7fc0);
for (i = 8; i < 16; i++) {
- env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
+ env->regs[i] = ldq_phys(cs->as, sm_state + 0x7ff8 - i * 8);
}
- env->eip = ldq_phys(sm_state + 0x7f78);
- cpu_load_eflags(env, ldl_phys(sm_state + 0x7f70),
+ env->eip = ldq_phys(cs->as, sm_state + 0x7f78);
+ cpu_load_eflags(env, ldl_phys(cs->as, sm_state + 0x7f70),
~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
- env->dr[6] = ldl_phys(sm_state + 0x7f68);
- env->dr[7] = ldl_phys(sm_state + 0x7f60);
+ env->dr[6] = ldl_phys(cs->as, sm_state + 0x7f68);
+ env->dr[7] = ldl_phys(cs->as, sm_state + 0x7f60);
- cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
- cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
- cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
+ cpu_x86_update_cr4(env, ldl_phys(cs->as, sm_state + 0x7f48));
+ cpu_x86_update_cr3(env, ldl_phys(cs->as, sm_state + 0x7f50));
+ cpu_x86_update_cr0(env, ldl_phys(cs->as, sm_state + 0x7f58));
- val = ldl_phys(sm_state + 0x7efc); /* revision ID */
+ val = ldl_phys(cs->as, sm_state + 0x7efc); /* revision ID */
if (val & 0x20000) {
- env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
+ env->smbase = ldl_phys(cs->as, sm_state + 0x7f00) & ~0x7fff;
}
#else
- cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
- cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
- cpu_load_eflags(env, ldl_phys(sm_state + 0x7ff4),
+ cpu_x86_update_cr0(env, ldl_phys(cs->as, sm_state + 0x7ffc));
+ cpu_x86_update_cr3(env, ldl_phys(cs->as, sm_state + 0x7ff8));
+ cpu_load_eflags(env, ldl_phys(cs->as, sm_state + 0x7ff4),
~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
- env->eip = ldl_phys(sm_state + 0x7ff0);
- env->regs[R_EDI] = ldl_phys(sm_state + 0x7fec);
- env->regs[R_ESI] = ldl_phys(sm_state + 0x7fe8);
- env->regs[R_EBP] = ldl_phys(sm_state + 0x7fe4);
- env->regs[R_ESP] = ldl_phys(sm_state + 0x7fe0);
- env->regs[R_EBX] = ldl_phys(sm_state + 0x7fdc);
- env->regs[R_EDX] = ldl_phys(sm_state + 0x7fd8);
- env->regs[R_ECX] = ldl_phys(sm_state + 0x7fd4);
- env->regs[R_EAX] = ldl_phys(sm_state + 0x7fd0);
- env->dr[6] = ldl_phys(sm_state + 0x7fcc);
- env->dr[7] = ldl_phys(sm_state + 0x7fc8);
-
- env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
- env->tr.base = ldl_phys(sm_state + 0x7f64);
- env->tr.limit = ldl_phys(sm_state + 0x7f60);
- env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
-
- env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
- env->ldt.base = ldl_phys(sm_state + 0x7f80);
- env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
- env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
-
- env->gdt.base = ldl_phys(sm_state + 0x7f74);
- env->gdt.limit = ldl_phys(sm_state + 0x7f70);
-
- env->idt.base = ldl_phys(sm_state + 0x7f58);
- env->idt.limit = ldl_phys(sm_state + 0x7f54);
+ env->eip = ldl_phys(cs->as, sm_state + 0x7ff0);
+ env->regs[R_EDI] = ldl_phys(cs->as, sm_state + 0x7fec);
+ env->regs[R_ESI] = ldl_phys(cs->as, sm_state + 0x7fe8);
+ env->regs[R_EBP] = ldl_phys(cs->as, sm_state + 0x7fe4);
+ env->regs[R_ESP] = ldl_phys(cs->as, sm_state + 0x7fe0);
+ env->regs[R_EBX] = ldl_phys(cs->as, sm_state + 0x7fdc);
+ env->regs[R_EDX] = ldl_phys(cs->as, sm_state + 0x7fd8);
+ env->regs[R_ECX] = ldl_phys(cs->as, sm_state + 0x7fd4);
+ env->regs[R_EAX] = ldl_phys(cs->as, sm_state + 0x7fd0);
+ env->dr[6] = ldl_phys(cs->as, sm_state + 0x7fcc);
+ env->dr[7] = ldl_phys(cs->as, sm_state + 0x7fc8);
+
+ env->tr.selector = ldl_phys(cs->as, sm_state + 0x7fc4) & 0xffff;
+ env->tr.base = ldl_phys(cs->as, sm_state + 0x7f64);
+ env->tr.limit = ldl_phys(cs->as, sm_state + 0x7f60);
+ env->tr.flags = (ldl_phys(cs->as, sm_state + 0x7f5c) & 0xf0ff) << 8;
+
+ env->ldt.selector = ldl_phys(cs->as, sm_state + 0x7fc0) & 0xffff;
+ env->ldt.base = ldl_phys(cs->as, sm_state + 0x7f80);
+ env->ldt.limit = ldl_phys(cs->as, sm_state + 0x7f7c);
+ env->ldt.flags = (ldl_phys(cs->as, sm_state + 0x7f78) & 0xf0ff) << 8;
+
+ env->gdt.base = ldl_phys(cs->as, sm_state + 0x7f74);
+ env->gdt.limit = ldl_phys(cs->as, sm_state + 0x7f70);
+
+ env->idt.base = ldl_phys(cs->as, sm_state + 0x7f58);
+ env->idt.limit = ldl_phys(cs->as, sm_state + 0x7f54);
for (i = 0; i < 6; i++) {
if (i < 3) {
@@ -280,16 +282,18 @@ void helper_rsm(CPUX86State *env)
offset = 0x7f2c + (i - 3) * 12;
}
cpu_x86_load_seg_cache(env, i,
- ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
- ldl_phys(sm_state + offset + 8),
- ldl_phys(sm_state + offset + 4),
- (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
+ ldl_phys(cs->as,
+ sm_state + 0x7fa8 + i * 4) & 0xffff,
+ ldl_phys(cs->as, sm_state + offset + 8),
+ ldl_phys(cs->as, sm_state + offset + 4),
+ (ldl_phys(cs->as,
+ sm_state + offset) & 0xf0ff) << 8);
}
- cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
+ cpu_x86_update_cr4(env, ldl_phys(cs->as, sm_state + 0x7f14));
- val = ldl_phys(sm_state + 0x7efc); /* revision ID */
+ val = ldl_phys(cs->as, sm_state + 0x7efc); /* revision ID */
if (val & 0x20000) {
- env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
+ env->smbase = ldl_phys(cs->as, sm_state + 0x7ef8) & ~0x7fff;
}
#endif
CC_OP = CC_OP_EFLAGS;
diff --git a/target-i386/svm_helper.c b/target-i386/svm_helper.c
index 4a7de42b35..b38d45002f 100644
--- a/target-i386/svm_helper.c
+++ b/target-i386/svm_helper.c
@@ -88,25 +88,28 @@ void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
const SegmentCache *sc)
{
- stw_phys(addr + offsetof(struct vmcb_seg, selector),
+ CPUState *cs = ENV_GET_CPU(env);
+ stw_phys(cs->as, addr + offsetof(struct vmcb_seg, selector),
sc->selector);
- stq_phys(addr + offsetof(struct vmcb_seg, base),
+ stq_phys(cs->as, addr + offsetof(struct vmcb_seg, base),
sc->base);
- stl_phys(addr + offsetof(struct vmcb_seg, limit),
+ stl_phys(cs->as, addr + offsetof(struct vmcb_seg, limit),
sc->limit);
- stw_phys(addr + offsetof(struct vmcb_seg, attrib),
+ stw_phys(cs->as, addr + offsetof(struct vmcb_seg, attrib),
((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
}
static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
SegmentCache *sc)
{
+ CPUState *cs = ENV_GET_CPU(env);
unsigned int flags;
- sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
- sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
- sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
- flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
+ sc->selector = lduw_phys(cs->as,
+ addr + offsetof(struct vmcb_seg, selector));
+ sc->base = ldq_phys(cs->as, addr + offsetof(struct vmcb_seg, base));
+ sc->limit = ldl_phys(cs->as, addr + offsetof(struct vmcb_seg, limit));
+ flags = lduw_phys(cs->as, addr + offsetof(struct vmcb_seg, attrib));
sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
}
@@ -122,6 +125,7 @@ static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
{
+ CPUState *cs = ENV_GET_CPU(env);
target_ulong addr;
uint32_t event_inj;
uint32_t int_ctl;
@@ -139,25 +143,33 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
env->vm_vmcb = addr;
/* save the current CPU state in the hsave page */
- stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
+ stq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
env->gdt.base);
- stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
+ stl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
env->gdt.limit);
- stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
+ stq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
env->idt.base);
- stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
+ stl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
env->idt.limit);
- stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
- stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
- stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
- stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
- stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
- stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
-
- stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
- stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags),
+ stq_phys(cs->as,
+ env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
+ stq_phys(cs->as,
+ env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
+ stq_phys(cs->as,
+ env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
+ stq_phys(cs->as,
+ env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
+ stq_phys(cs->as,
+ env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
+ stq_phys(cs->as,
+ env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
+
+ stq_phys(cs->as,
+ env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
+ stq_phys(cs->as,
+ env->vm_hsave + offsetof(struct vmcb, save.rflags),
cpu_compute_eflags(env));
svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es),
@@ -169,28 +181,30 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
&env->segs[R_DS]);
- stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
+ stq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.rip),
env->eip + next_eip_addend);
- stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
- stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
+ stq_phys(cs->as,
+ env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
+ stq_phys(cs->as,
+ env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
/* load the interception bitmaps so we do not need to access the
vmcb in svm mode */
- env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
+ env->intercept = ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
control.intercept));
- env->intercept_cr_read = lduw_phys(env->vm_vmcb +
+ env->intercept_cr_read = lduw_phys(cs->as, env->vm_vmcb +
offsetof(struct vmcb,
control.intercept_cr_read));
- env->intercept_cr_write = lduw_phys(env->vm_vmcb +
+ env->intercept_cr_write = lduw_phys(cs->as, env->vm_vmcb +
offsetof(struct vmcb,
control.intercept_cr_write));
- env->intercept_dr_read = lduw_phys(env->vm_vmcb +
+ env->intercept_dr_read = lduw_phys(cs->as, env->vm_vmcb +
offsetof(struct vmcb,
control.intercept_dr_read));
- env->intercept_dr_write = lduw_phys(env->vm_vmcb +
+ env->intercept_dr_write = lduw_phys(cs->as, env->vm_vmcb +
offsetof(struct vmcb,
control.intercept_dr_write));
- env->intercept_exceptions = ldl_phys(env->vm_vmcb +
+ env->intercept_exceptions = ldl_phys(cs->as, env->vm_vmcb +
offsetof(struct vmcb,
control.intercept_exceptions
));
@@ -198,30 +212,36 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
/* enable intercepts */
env->hflags |= HF_SVMI_MASK;
- env->tsc_offset = ldq_phys(env->vm_vmcb +
+ env->tsc_offset = ldq_phys(cs->as, env->vm_vmcb +
offsetof(struct vmcb, control.tsc_offset));
- env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
+ env->gdt.base = ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
save.gdtr.base));
- env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
+ env->gdt.limit = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
save.gdtr.limit));
- env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
+ env->idt.base = ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
save.idtr.base));
- env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
+ env->idt.limit = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
save.idtr.limit));
/* clear exit_info_2 so we behave like the real hardware */
- stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
+ stq_phys(cs->as,
+ env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
- cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
+ cpu_x86_update_cr0(env, ldq_phys(cs->as,
+ env->vm_vmcb + offsetof(struct vmcb,
save.cr0)));
- cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
+ cpu_x86_update_cr4(env, ldq_phys(cs->as,
+ env->vm_vmcb + offsetof(struct vmcb,
save.cr4)));
- cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
+ cpu_x86_update_cr3(env, ldq_phys(cs->as,
+ env->vm_vmcb + offsetof(struct vmcb,
save.cr3)));
- env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
- int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
+ env->cr[2] = ldq_phys(cs->as,
+ env->vm_vmcb + offsetof(struct vmcb, save.cr2));
+ int_ctl = ldl_phys(cs->as,
+ env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
if (int_ctl & V_INTR_MASKING_MASK) {
env->v_tpr = int_ctl & V_TPR_MASK;
@@ -232,9 +252,11 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
}
cpu_load_efer(env,
- ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
+ ldq_phys(cs->as,
+ env->vm_vmcb + offsetof(struct vmcb, save.efer)));
env->eflags = 0;
- cpu_load_eflags(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
+ cpu_load_eflags(env, ldq_phys(cs->as,
+ env->vm_vmcb + offsetof(struct vmcb,
save.rflags)),
~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
CC_OP = CC_OP_EFLAGS;
@@ -248,18 +270,25 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
R_DS);
- env->eip = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
-
- env->regs[R_ESP] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
- env->regs[R_EAX] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
- env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
- env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
- cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb,
+ env->eip = ldq_phys(cs->as,
+ env->vm_vmcb + offsetof(struct vmcb, save.rip));
+
+ env->regs[R_ESP] = ldq_phys(cs->as,
+ env->vm_vmcb + offsetof(struct vmcb, save.rsp));
+ env->regs[R_EAX] = ldq_phys(cs->as,
+ env->vm_vmcb + offsetof(struct vmcb, save.rax));
+ env->dr[7] = ldq_phys(cs->as,
+ env->vm_vmcb + offsetof(struct vmcb, save.dr7));
+ env->dr[6] = ldq_phys(cs->as,
+ env->vm_vmcb + offsetof(struct vmcb, save.dr6));
+ cpu_x86_set_cpl(env, ldub_phys(cs->as,
+ env->vm_vmcb + offsetof(struct vmcb,
save.cpl)));
/* FIXME: guest state consistency checks */
- switch (ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
+ switch (ldub_phys(cs->as,
+ env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
case TLB_CONTROL_DO_NOTHING:
break;
case TLB_CONTROL_FLUSH_ALL_ASID:
@@ -277,12 +306,12 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
}
/* maybe we need to inject an event */
- event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
+ event_inj = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
control.event_inj));
if (event_inj & SVM_EVTINJ_VALID) {
uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
- uint32_t event_inj_err = ldl_phys(env->vm_vmcb +
+ uint32_t event_inj_err = ldl_phys(cs->as, env->vm_vmcb +
offsetof(struct vmcb,
control.event_inj_err));
@@ -336,6 +365,7 @@ void helper_vmmcall(CPUX86State *env)
void helper_vmload(CPUX86State *env, int aflag)
{
+ CPUState *cs = ENV_GET_CPU(env);
target_ulong addr;
cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0);
@@ -348,7 +378,7 @@ void helper_vmload(CPUX86State *env, int aflag)
qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
"\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
- addr, ldq_phys(addr + offsetof(struct vmcb,
+ addr, ldq_phys(cs->as, addr + offsetof(struct vmcb,
save.fs.base)),
env->segs[R_FS].base);
@@ -358,22 +388,24 @@ void helper_vmload(CPUX86State *env, int aflag)
svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
#ifdef TARGET_X86_64
- env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb,
+ env->kernelgsbase = ldq_phys(cs->as, addr + offsetof(struct vmcb,
save.kernel_gs_base));
- env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
- env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
- env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
+ env->lstar = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.lstar));
+ env->cstar = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.cstar));
+ env->fmask = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.sfmask));
#endif
- env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
- env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
- env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb,
+ env->star = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.star));
+ env->sysenter_cs = ldq_phys(cs->as,
+ addr + offsetof(struct vmcb, save.sysenter_cs));
+ env->sysenter_esp = ldq_phys(cs->as, addr + offsetof(struct vmcb,
save.sysenter_esp));
- env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb,
+ env->sysenter_eip = ldq_phys(cs->as, addr + offsetof(struct vmcb,
save.sysenter_eip));
}
void helper_vmsave(CPUX86State *env, int aflag)
{
+ CPUState *cs = ENV_GET_CPU(env);
target_ulong addr;
cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0);
@@ -386,7 +418,8 @@ void helper_vmsave(CPUX86State *env, int aflag)
qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
"\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
- addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
+ addr, ldq_phys(cs->as,
+ addr + offsetof(struct vmcb, save.fs.base)),
env->segs[R_FS].base);
svm_save_seg(env, addr + offsetof(struct vmcb, save.fs),
@@ -399,17 +432,18 @@ void helper_vmsave(CPUX86State *env, int aflag)
&env->ldt);
#ifdef TARGET_X86_64
- stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base),
+ stq_phys(cs->as, addr + offsetof(struct vmcb, save.kernel_gs_base),
env->kernelgsbase);
- stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
- stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
- stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
+ stq_phys(cs->as, addr + offsetof(struct vmcb, save.lstar), env->lstar);
+ stq_phys(cs->as, addr + offsetof(struct vmcb, save.cstar), env->cstar);
+ stq_phys(cs->as, addr + offsetof(struct vmcb, save.sfmask), env->fmask);
#endif
- stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
- stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
- stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp),
+ stq_phys(cs->as, addr + offsetof(struct vmcb, save.star), env->star);
+ stq_phys(cs->as,
+ addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
+ stq_phys(cs->as, addr + offsetof(struct vmcb, save.sysenter_esp),
env->sysenter_esp);
- stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip),
+ stq_phys(cs->as, addr + offsetof(struct vmcb, save.sysenter_eip),
env->sysenter_eip);
}
@@ -452,6 +486,8 @@ void helper_invlpga(CPUX86State *env, int aflag)
void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
uint64_t param)
{
+ CPUState *cs = ENV_GET_CPU(env);
+
if (likely(!(env->hflags & HF_SVMI_MASK))) {
return;
}
@@ -484,7 +520,7 @@ void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
case SVM_EXIT_MSR:
if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
/* FIXME: this should be read in at vmrun (faster this way?) */
- uint64_t addr = ldq_phys(env->vm_vmcb +
+ uint64_t addr = ldq_phys(cs->as, env->vm_vmcb +
offsetof(struct vmcb,
control.msrpm_base_pa));
uint32_t t0, t1;
@@ -510,7 +546,7 @@ void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
t1 = 0;
break;
}
- if (ldub_phys(addr + t1) & ((1 << param) << t0)) {
+ if (ldub_phys(cs->as, addr + t1) & ((1 << param) << t0)) {
helper_vmexit(env, type, param);
}
}
@@ -532,15 +568,17 @@ void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
uint32_t next_eip_addend)
{
+ CPUState *cs = ENV_GET_CPU(env);
if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
/* FIXME: this should be read in at vmrun (faster this way?) */
- uint64_t addr = ldq_phys(env->vm_vmcb +
+ uint64_t addr = ldq_phys(cs->as, env->vm_vmcb +
offsetof(struct vmcb, control.iopm_base_pa));
uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
- if (lduw_phys(addr + port / 8) & (mask << (port & 7))) {
+ if (lduw_phys(cs->as, addr + port / 8) & (mask << (port & 7))) {
/* next env->eip */
- stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
+ stq_phys(cs->as,
+ env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
env->eip + next_eip_addend);
helper_vmexit(env, SVM_EXIT_IOIO, param | (port << 16));
}
@@ -556,16 +594,18 @@ void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
PRIx64 ", " TARGET_FMT_lx ")!\n",
exit_code, exit_info_1,
- ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
+ ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
control.exit_info_2)),
env->eip);
if (env->hflags & HF_INHIBIT_IRQ_MASK) {
- stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state),
+ stl_phys(cs->as,
+ env->vm_vmcb + offsetof(struct vmcb, control.int_state),
SVM_INTERRUPT_SHADOW_MASK);
env->hflags &= ~HF_INHIBIT_IRQ_MASK;
} else {
- stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
+ stl_phys(cs->as,
+ env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
}
/* Save the VM state in the vmcb */
@@ -578,39 +618,50 @@ void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
&env->segs[R_DS]);
- stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
+ stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
env->gdt.base);
- stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
+ stl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
env->gdt.limit);
- stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
+ stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
env->idt.base);
- stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
+ stl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
env->idt.limit);
- stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
- stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
- stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
- stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
- stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
-
- int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
+ stq_phys(cs->as,
+ env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
+ stq_phys(cs->as,
+ env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
+ stq_phys(cs->as,
+ env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
+ stq_phys(cs->as,
+ env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
+ stq_phys(cs->as,
+ env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
+
+ int_ctl = ldl_phys(cs->as,
+ env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
int_ctl |= env->v_tpr & V_TPR_MASK;
if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
int_ctl |= V_IRQ_MASK;
}
- stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
+ stl_phys(cs->as,
+ env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
- stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags),
+ stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
cpu_compute_eflags(env));
- stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip),
+ stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.rip),
env->eip);
- stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
- stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
- stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
- stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
- stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl),
+ stq_phys(cs->as,
+ env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
+ stq_phys(cs->as,
+ env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
+ stq_phys(cs->as,
+ env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
+ stq_phys(cs->as,
+ env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
+ stb_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.cpl),
env->hflags & HF_CPL_MASK);
/* Reload the host state from vm_hsave */
@@ -621,29 +672,33 @@ void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
env->tsc_offset = 0;
- env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb,
+ env->gdt.base = ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
save.gdtr.base));
- env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb,
+ env->gdt.limit = ldl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
save.gdtr.limit));
- env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb,
+ env->idt.base = ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
save.idtr.base));
- env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb,
+ env->idt.limit = ldl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
save.idtr.limit));
- cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
+ cpu_x86_update_cr0(env, ldq_phys(cs->as,
+ env->vm_hsave + offsetof(struct vmcb,
save.cr0)) |
CR0_PE_MASK);
- cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
+ cpu_x86_update_cr4(env, ldq_phys(cs->as,
+ env->vm_hsave + offsetof(struct vmcb,
save.cr4)));
- cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
+ cpu_x86_update_cr3(env, ldq_phys(cs->as,
+ env->vm_hsave + offsetof(struct vmcb,
save.cr3)));
/* we need to set the efer after the crs so the hidden flags get
set properly */
- cpu_load_efer(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
+ cpu_load_efer(env, ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
save.efer)));
env->eflags = 0;
- cpu_load_eflags(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
+ cpu_load_eflags(env, ldq_phys(cs->as,
+ env->vm_hsave + offsetof(struct vmcb,
save.rflags)),
~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
CC_OP = CC_OP_EFLAGS;
@@ -657,29 +712,35 @@ void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
R_DS);
- env->eip = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
- env->regs[R_ESP] = ldq_phys(env->vm_hsave +
+ env->eip = ldq_phys(cs->as,
+ env->vm_hsave + offsetof(struct vmcb, save.rip));
+ env->regs[R_ESP] = ldq_phys(cs->as, env->vm_hsave +
offsetof(struct vmcb, save.rsp));
- env->regs[R_EAX] = ldq_phys(env->vm_hsave +
+ env->regs[R_EAX] = ldq_phys(cs->as, env->vm_hsave +
offsetof(struct vmcb, save.rax));
- env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
- env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
+ env->dr[6] = ldq_phys(cs->as,
+ env->vm_hsave + offsetof(struct vmcb, save.dr6));
+ env->dr[7] = ldq_phys(cs->as,
+ env->vm_hsave + offsetof(struct vmcb, save.dr7));
/* other setups */
cpu_x86_set_cpl(env, 0);
- stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
+ stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
exit_code);
- stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
+ stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
exit_info_1);
- stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
- ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
+ stl_phys(cs->as,
+ env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
+ ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
control.event_inj)));
- stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
- ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
+ stl_phys(cs->as,
+ env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
+ ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
control.event_inj_err)));
- stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
+ stl_phys(cs->as,
+ env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
env->hflags2 &= ~HF2_GIF_MASK;
/* FIXME: Resets the current ASID register to zero (host ASID). */
diff --git a/target-ppc/excp_helper.c b/target-ppc/excp_helper.c
index 26c57d9a34..d541929743 100644
--- a/target-ppc/excp_helper.c
+++ b/target-ppc/excp_helper.c
@@ -180,12 +180,14 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
msr |= env->error_code;
goto store_next;
case POWERPC_EXCP_EXTERNAL: /* External input */
+ cs = CPU(cpu);
+
if (lpes0 == 1) {
new_msr |= (target_ulong)MSR_HVB;
}
if (env->mpic_proxy) {
/* IACK the IRQ on delivery */
- env->spr[SPR_BOOKE_EPR] = ldl_phys(env->mpic_iack);
+ env->spr[SPR_BOOKE_EPR] = ldl_phys(cs->as, env->mpic_iack);
}
goto store_next;
case POWERPC_EXCP_ALIGN: /* Alignment exception */
diff --git a/target-ppc/mmu-hash32.h b/target-ppc/mmu-hash32.h
index 884786b97a..4671141a32 100644
--- a/target-ppc/mmu-hash32.h
+++ b/target-ppc/mmu-hash32.h
@@ -68,29 +68,33 @@ int ppc_hash32_handle_mmu_fault(CPUPPCState *env, target_ulong address, int rw,
static inline target_ulong ppc_hash32_load_hpte0(CPUPPCState *env,
hwaddr pte_offset)
{
+ CPUState *cs = ENV_GET_CPU(env);
assert(!env->external_htab); /* Not supported on 32-bit for now */
- return ldl_phys(env->htab_base + pte_offset);
+ return ldl_phys(cs->as, env->htab_base + pte_offset);
}
static inline target_ulong ppc_hash32_load_hpte1(CPUPPCState *env,
hwaddr pte_offset)
{
+ CPUState *cs = ENV_GET_CPU(env);
assert(!env->external_htab); /* Not supported on 32-bit for now */
- return ldl_phys(env->htab_base + pte_offset + HASH_PTE_SIZE_32/2);
+ return ldl_phys(cs->as, env->htab_base + pte_offset + HASH_PTE_SIZE_32/2);
}
static inline void ppc_hash32_store_hpte0(CPUPPCState *env,
hwaddr pte_offset, target_ulong pte0)
{
+ CPUState *cs = ENV_GET_CPU(env);
assert(!env->external_htab); /* Not supported on 32-bit for now */
- stl_phys(env->htab_base + pte_offset, pte0);
+ stl_phys(cs->as, env->htab_base + pte_offset, pte0);
}
static inline void ppc_hash32_store_hpte1(CPUPPCState *env,
hwaddr pte_offset, target_ulong pte1)
{
+ CPUState *cs = ENV_GET_CPU(env);
assert(!env->external_htab); /* Not supported on 32-bit for now */
- stl_phys(env->htab_base + pte_offset + HASH_PTE_SIZE_32/2, pte1);
+ stl_phys(cs->as, env->htab_base + pte_offset + HASH_PTE_SIZE_32/2, pte1);
}
typedef struct {
diff --git a/target-ppc/mmu-hash64.h b/target-ppc/mmu-hash64.h
index 55f5a230fd..a8da558ca2 100644
--- a/target-ppc/mmu-hash64.h
+++ b/target-ppc/mmu-hash64.h
@@ -78,40 +78,46 @@ int ppc_hash64_handle_mmu_fault(CPUPPCState *env, target_ulong address, int rw,
static inline target_ulong ppc_hash64_load_hpte0(CPUPPCState *env,
hwaddr pte_offset)
{
+ CPUState *cs = ENV_GET_CPU(env);
if (env->external_htab) {
return ldq_p(env->external_htab + pte_offset);
} else {
- return ldq_phys(env->htab_base + pte_offset);
+ return ldq_phys(cs->as, env->htab_base + pte_offset);
}
}
static inline target_ulong ppc_hash64_load_hpte1(CPUPPCState *env,
hwaddr pte_offset)
{
+ CPUState *cs = ENV_GET_CPU(env);
if (env->external_htab) {
return ldq_p(env->external_htab + pte_offset + HASH_PTE_SIZE_64/2);
} else {
- return ldq_phys(env->htab_base + pte_offset + HASH_PTE_SIZE_64/2);
+ return ldq_phys(cs->as,
+ env->htab_base + pte_offset + HASH_PTE_SIZE_64/2);
}
}
static inline void ppc_hash64_store_hpte0(CPUPPCState *env,
hwaddr pte_offset, target_ulong pte0)
{
+ CPUState *cs = ENV_GET_CPU(env);
if (env->external_htab) {
stq_p(env->external_htab + pte_offset, pte0);
} else {
- stq_phys(env->htab_base + pte_offset, pte0);
+ stq_phys(cs->as, env->htab_base + pte_offset, pte0);
}
}
static inline void ppc_hash64_store_hpte1(CPUPPCState *env,
hwaddr pte_offset, target_ulong pte1)
{
+ CPUState *cs = ENV_GET_CPU(env);
if (env->external_htab) {
stq_p(env->external_htab + pte_offset + HASH_PTE_SIZE_64/2, pte1);
} else {
- stq_phys(env->htab_base + pte_offset + HASH_PTE_SIZE_64/2, pte1);
+ stq_phys(cs->as,
+ env->htab_base + pte_offset + HASH_PTE_SIZE_64/2, pte1);
}
}
diff --git a/target-s390x/cpu.c b/target-s390x/cpu.c
index 3c89f8a767..ff57b806e4 100644
--- a/target-s390x/cpu.c
+++ b/target-s390x/cpu.c
@@ -70,7 +70,7 @@ static void s390_cpu_set_pc(CPUState *cs, vaddr value)
static void s390_cpu_load_normal(CPUState *s)
{
S390CPU *cpu = S390_CPU(s);
- cpu->env.psw.addr = ldl_phys(4) & PSW_MASK_ESA_ADDR;
+ cpu->env.psw.addr = ldl_phys(s->as, 4) & PSW_MASK_ESA_ADDR;
cpu->env.psw.mask = PSW_MASK_32 | PSW_MASK_64;
s390_add_running_cpu(cpu);
}
diff --git a/target-s390x/helper.c b/target-s390x/helper.c
index da33b38009..aa537e1bff 100644
--- a/target-s390x/helper.c
+++ b/target-s390x/helper.c
@@ -138,18 +138,21 @@ static int trans_bits(CPUS390XState *env, uint64_t mode)
static void trigger_prot_fault(CPUS390XState *env, target_ulong vaddr,
uint64_t mode)
{
+ CPUState *cs = ENV_GET_CPU(env);
int ilen = ILEN_LATER_INC;
int bits = trans_bits(env, mode) | 4;
DPRINTF("%s: vaddr=%016" PRIx64 " bits=%d\n", __func__, vaddr, bits);
- stq_phys(env->psa + offsetof(LowCore, trans_exc_code), vaddr | bits);
+ stq_phys(cs->as,
+ env->psa + offsetof(LowCore, trans_exc_code), vaddr | bits);
trigger_pgm_exception(env, PGM_PROTECTION, ilen);
}
static void trigger_page_fault(CPUS390XState *env, target_ulong vaddr,
uint32_t type, uint64_t asc, int rw)
{
+ CPUState *cs = ENV_GET_CPU(env);
int ilen = ILEN_LATER;
int bits = trans_bits(env, asc);
@@ -160,7 +163,8 @@ static void trigger_page_fault(CPUS390XState *env, target_ulong vaddr,
DPRINTF("%s: vaddr=%016" PRIx64 " bits=%d\n", __func__, vaddr, bits);
- stq_phys(env->psa + offsetof(LowCore, trans_exc_code), vaddr | bits);
+ stq_phys(cs->as,
+ env->psa + offsetof(LowCore, trans_exc_code), vaddr | bits);
trigger_pgm_exception(env, type, ilen);
}
@@ -168,6 +172,7 @@ static int mmu_translate_asce(CPUS390XState *env, target_ulong vaddr,
uint64_t asc, uint64_t asce, int level,
target_ulong *raddr, int *flags, int rw)
{
+ CPUState *cs = ENV_GET_CPU(env);
uint64_t offs = 0;
uint64_t origin;
uint64_t new_asce;
@@ -218,7 +223,7 @@ static int mmu_translate_asce(CPUS390XState *env, target_ulong vaddr,
/* XXX region protection flags */
/* *flags &= ~PAGE_WRITE */
- new_asce = ldq_phys(origin + offs);
+ new_asce = ldq_phys(cs->as, origin + offs);
PTE_DPRINTF("%s: 0x%" PRIx64 " + 0x%" PRIx64 " => 0x%016" PRIx64 "\n",
__func__, origin, offs, new_asce);
diff --git a/target-s390x/mem_helper.c b/target-s390x/mem_helper.c
index 1422ae97a8..875ea95de4 100644
--- a/target-s390x/mem_helper.c
+++ b/target-s390x/mem_helper.c
@@ -955,6 +955,7 @@ uint32_t HELPER(csp)(CPUS390XState *env, uint32_t r1, uint64_t r2)
static uint32_t mvc_asc(CPUS390XState *env, int64_t l, uint64_t a1,
uint64_t mode1, uint64_t a2, uint64_t mode2)
{
+ CPUState *cs = ENV_GET_CPU(env);
target_ulong src, dest;
int flags, cc = 0, i;
@@ -984,7 +985,7 @@ static uint32_t mvc_asc(CPUS390XState *env, int64_t l, uint64_t a1,
mvc_asc(env, l - i, a1 + i, mode1, a2 + i, mode2);
break;
}
- stb_phys(dest + i, ldub_phys(src + i));
+ stb_phys(cs->as, dest + i, ldub_phys(cs->as, src + i));
}
return cc;
@@ -1009,6 +1010,7 @@ uint32_t HELPER(mvcp)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
/* invalidate pte */
void HELPER(ipte)(CPUS390XState *env, uint64_t pte_addr, uint64_t vaddr)
{
+ CPUState *cs = ENV_GET_CPU(env);
uint64_t page = vaddr & TARGET_PAGE_MASK;
uint64_t pte = 0;
@@ -1018,7 +1020,7 @@ void HELPER(ipte)(CPUS390XState *env, uint64_t pte_addr, uint64_t vaddr)
According to spec we'd have to find it out ourselves */
/* XXX Linux is fine with overwriting the pte, the spec requires
us to only set the invalid bit */
- stq_phys(pte_addr, pte | _PAGE_INVALID);
+ stq_phys(cs->as, pte_addr, pte | _PAGE_INVALID);
/* XXX we exploit the fact that Linux passes the exact virtual
address here - it's not obliged to! */
@@ -1041,7 +1043,8 @@ void HELPER(ptlb)(CPUS390XState *env)
/* store using real address */
void HELPER(stura)(CPUS390XState *env, uint64_t addr, uint64_t v1)
{
- stw_phys(get_address(env, 0, 0, addr), (uint32_t)v1);
+ CPUState *cs = ENV_GET_CPU(env);
+ stw_phys(cs->as, get_address(env, 0, 0, addr), (uint32_t)v1);
}
/* load real address */
diff --git a/target-sparc/ldst_helper.c b/target-sparc/ldst_helper.c
index 2936b58b31..92761ad17b 100644
--- a/target-sparc/ldst_helper.c
+++ b/target-sparc/ldst_helper.c
@@ -447,6 +447,7 @@ static uint64_t leon3_cache_control_ld(CPUSPARCState *env, target_ulong addr,
uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr, int asi, int size,
int sign)
{
+ CPUState *cs = ENV_GET_CPU(env);
uint64_t ret = 0;
#if defined(DEBUG_MXCC) || defined(DEBUG_ASI)
uint32_t last_addr = addr;
@@ -608,37 +609,37 @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr, int asi, int size,
case 0x1c: /* LEON MMU passthrough */
switch (size) {
case 1:
- ret = ldub_phys(addr);
+ ret = ldub_phys(cs->as, addr);
break;
case 2:
- ret = lduw_phys(addr);
+ ret = lduw_phys(cs->as, addr);
break;
default:
case 4:
- ret = ldl_phys(addr);
+ ret = ldl_phys(cs->as, addr);
break;
case 8:
- ret = ldq_phys(addr);
+ ret = ldq_phys(cs->as, addr);
break;
}
break;
case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
switch (size) {
case 1:
- ret = ldub_phys((hwaddr)addr
+ ret = ldub_phys(cs->as, (hwaddr)addr
| ((hwaddr)(asi & 0xf) << 32));
break;
case 2:
- ret = lduw_phys((hwaddr)addr
+ ret = lduw_phys(cs->as, (hwaddr)addr
| ((hwaddr)(asi & 0xf) << 32));
break;
default:
case 4:
- ret = ldl_phys((hwaddr)addr
+ ret = ldl_phys(cs->as, (hwaddr)addr
| ((hwaddr)(asi & 0xf) << 32));
break;
case 8:
- ret = ldq_phys((hwaddr)addr
+ ret = ldq_phys(cs->as, (hwaddr)addr
| ((hwaddr)(asi & 0xf) << 32));
break;
}
@@ -715,6 +716,7 @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr, int asi, int size,
void helper_st_asi(CPUSPARCState *env, target_ulong addr, uint64_t val, int asi,
int size)
{
+ CPUState *cs = ENV_GET_CPU(env);
helper_check_align(env, addr, size - 1);
switch (asi) {
case 2: /* SuperSparc MXCC registers and Leon3 cache control */
@@ -771,13 +773,17 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, uint64_t val, int asi,
"%08x: unimplemented access size: %d\n", addr,
size);
}
- env->mxccdata[0] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
+ env->mxccdata[0] = ldq_phys(cs->as,
+ (env->mxccregs[0] & 0xffffffffULL) +
0);
- env->mxccdata[1] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
+ env->mxccdata[1] = ldq_phys(cs->as,
+ (env->mxccregs[0] & 0xffffffffULL) +
8);
- env->mxccdata[2] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
+ env->mxccdata[2] = ldq_phys(cs->as,
+ (env->mxccregs[0] & 0xffffffffULL) +
16);
- env->mxccdata[3] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
+ env->mxccdata[3] = ldq_phys(cs->as,
+ (env->mxccregs[0] & 0xffffffffULL) +
24);
break;
case 0x01c00200: /* MXCC stream destination */
@@ -788,13 +794,13 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, uint64_t val, int asi,
"%08x: unimplemented access size: %d\n", addr,
size);
}
- stq_phys((env->mxccregs[1] & 0xffffffffULL) + 0,
+ stq_phys(cs->as, (env->mxccregs[1] & 0xffffffffULL) + 0,
env->mxccdata[0]);
- stq_phys((env->mxccregs[1] & 0xffffffffULL) + 8,
+ stq_phys(cs->as, (env->mxccregs[1] & 0xffffffffULL) + 8,
env->mxccdata[1]);
- stq_phys((env->mxccregs[1] & 0xffffffffULL) + 16,
+ stq_phys(cs->as, (env->mxccregs[1] & 0xffffffffULL) + 16,
env->mxccdata[2]);
- stq_phys((env->mxccregs[1] & 0xffffffffULL) + 24,
+ stq_phys(cs->as, (env->mxccregs[1] & 0xffffffffULL) + 24,
env->mxccdata[3]);
break;
case 0x01c00a00: /* MXCC control register */
@@ -1006,17 +1012,17 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, uint64_t val, int asi,
{
switch (size) {
case 1:
- stb_phys(addr, val);
+ stb_phys(cs->as, addr, val);
break;
case 2:
- stw_phys(addr, val);
+ stw_phys(cs->as, addr, val);
break;
case 4:
default:
- stl_phys(addr, val);
+ stl_phys(cs->as, addr, val);
break;
case 8:
- stq_phys(addr, val);
+ stq_phys(cs->as, addr, val);
break;
}
}
@@ -1025,20 +1031,20 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, uint64_t val, int asi,
{
switch (size) {
case 1:
- stb_phys((hwaddr)addr
+ stb_phys(cs->as, (hwaddr)addr
| ((hwaddr)(asi & 0xf) << 32), val);
break;
case 2:
- stw_phys((hwaddr)addr
+ stw_phys(cs->as, (hwaddr)addr
| ((hwaddr)(asi & 0xf) << 32), val);
break;
case 4:
default:
- stl_phys((hwaddr)addr
+ stl_phys(cs->as, (hwaddr)addr
| ((hwaddr)(asi & 0xf) << 32), val);
break;
case 8:
- stq_phys((hwaddr)addr
+ stq_phys(cs->as, (hwaddr)addr
| ((hwaddr)(asi & 0xf) << 32), val);
break;
}
@@ -1284,6 +1290,7 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val,
uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr, int asi, int size,
int sign)
{
+ CPUState *cs = ENV_GET_CPU(env);
uint64_t ret = 0;
#if defined(DEBUG_ASI)
target_ulong last_addr = addr;
@@ -1432,17 +1439,17 @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr, int asi, int size,
{
switch (size) {
case 1:
- ret = ldub_phys(addr);
+ ret = ldub_phys(cs->as, addr);
break;
case 2:
- ret = lduw_phys(addr);
+ ret = lduw_phys(cs->as, addr);
break;
case 4:
- ret = ldl_phys(addr);
+ ret = ldl_phys(cs->as, addr);
break;
default:
case 8:
- ret = ldq_phys(addr);
+ ret = ldq_phys(cs->as, addr);
break;
}
break;
@@ -1653,6 +1660,7 @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr, int asi, int size,
void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val,
int asi, int size)
{
+ CPUState *cs = ENV_GET_CPU(env);
#ifdef DEBUG_ASI
dump_asi("write", addr, asi, size, val);
#endif
@@ -1803,17 +1811,17 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val,
{
switch (size) {
case 1:
- stb_phys(addr, val);
+ stb_phys(cs->as, addr, val);
break;
case 2:
- stw_phys(addr, val);
+ stw_phys(cs->as, addr, val);
break;
case 4:
- stl_phys(addr, val);
+ stl_phys(cs->as, addr, val);
break;
case 8:
default:
- stq_phys(addr, val);
+ stq_phys(cs->as, addr, val);
break;
}
}
diff --git a/target-sparc/mmu_helper.c b/target-sparc/mmu_helper.c
index ef12a0a8d0..5fc2fd64bb 100644
--- a/target-sparc/mmu_helper.c
+++ b/target-sparc/mmu_helper.c
@@ -86,6 +86,7 @@ static int get_physical_address(CPUSPARCState *env, hwaddr *physical,
uint32_t pde;
int error_code = 0, is_dirty, is_user;
unsigned long page_offset;
+ CPUState *cs = ENV_GET_CPU(env);
is_user = mmu_idx == MMU_USER_IDX;
@@ -108,7 +109,7 @@ static int get_physical_address(CPUSPARCState *env, hwaddr *physical,
/* SPARC reference MMU table walk: Context table->L1->L2->PTE */
/* Context base + context number */
pde_ptr = (env->mmuregs[1] << 4) + (env->mmuregs[2] << 2);
- pde = ldl_phys(pde_ptr);
+ pde = ldl_phys(cs->as, pde_ptr);
/* Ctx pde */
switch (pde & PTE_ENTRYTYPE_MASK) {
@@ -120,7 +121,7 @@ static int get_physical_address(CPUSPARCState *env, hwaddr *physical,
return 4 << 2;
case 1: /* L0 PDE */
pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4);
- pde = ldl_phys(pde_ptr);
+ pde = ldl_phys(cs->as, pde_ptr);
switch (pde & PTE_ENTRYTYPE_MASK) {
default:
@@ -130,7 +131,7 @@ static int get_physical_address(CPUSPARCState *env, hwaddr *physical,
return (1 << 8) | (4 << 2);
case 1: /* L1 PDE */
pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4);
- pde = ldl_phys(pde_ptr);
+ pde = ldl_phys(cs->as, pde_ptr);
switch (pde & PTE_ENTRYTYPE_MASK) {
default:
@@ -140,7 +141,7 @@ static int get_physical_address(CPUSPARCState *env, hwaddr *physical,
return (2 << 8) | (4 << 2);
case 1: /* L2 PDE */
pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4);
- pde = ldl_phys(pde_ptr);
+ pde = ldl_phys(cs->as, pde_ptr);
switch (pde & PTE_ENTRYTYPE_MASK) {
default:
@@ -179,7 +180,7 @@ static int get_physical_address(CPUSPARCState *env, hwaddr *physical,
if (is_dirty) {
pde |= PG_MODIFIED_MASK;
}
- stl_phys_notdirty(pde_ptr, pde);
+ stl_phys_notdirty(cs->as, pde_ptr, pde);
}
/* the page can be put in the TLB */
@@ -244,13 +245,14 @@ int cpu_sparc_handle_mmu_fault(CPUSPARCState *env, target_ulong address, int rw,
target_ulong mmu_probe(CPUSPARCState *env, target_ulong address, int mmulev)
{
+ CPUState *cs = ENV_GET_CPU(env);
hwaddr pde_ptr;
uint32_t pde;
/* Context base + context number */
pde_ptr = (hwaddr)(env->mmuregs[1] << 4) +
(env->mmuregs[2] << 2);
- pde = ldl_phys(pde_ptr);
+ pde = ldl_phys(cs->as, pde_ptr);
switch (pde & PTE_ENTRYTYPE_MASK) {
default:
@@ -263,7 +265,7 @@ target_ulong mmu_probe(CPUSPARCState *env, target_ulong address, int mmulev)
return pde;
}
pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4);
- pde = ldl_phys(pde_ptr);
+ pde = ldl_phys(cs->as, pde_ptr);
switch (pde & PTE_ENTRYTYPE_MASK) {
default:
@@ -277,7 +279,7 @@ target_ulong mmu_probe(CPUSPARCState *env, target_ulong address, int mmulev)
return pde;
}
pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4);
- pde = ldl_phys(pde_ptr);
+ pde = ldl_phys(cs->as, pde_ptr);
switch (pde & PTE_ENTRYTYPE_MASK) {
default:
@@ -291,7 +293,7 @@ target_ulong mmu_probe(CPUSPARCState *env, target_ulong address, int mmulev)
return pde;
}
pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4);
- pde = ldl_phys(pde_ptr);
+ pde = ldl_phys(cs->as, pde_ptr);
switch (pde & PTE_ENTRYTYPE_MASK) {
default:
@@ -317,7 +319,7 @@ void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUSPARCState *env)
uint32_t pde;
pde_ptr = (env->mmuregs[1] << 4) + (env->mmuregs[2] << 2);
- pde = ldl_phys(pde_ptr);
+ pde = ldl_phys(cs->as, pde_ptr);
(*cpu_fprintf)(f, "Root ptr: " TARGET_FMT_plx ", ctx: %d\n",
(hwaddr)env->mmuregs[1] << 4, env->mmuregs[2]);
for (n = 0, va = 0; n < 256; n++, va += 16 * 1024 * 1024) {
diff --git a/target-unicore32/softmmu.c b/target-unicore32/softmmu.c
index 1e13a85d05..22defc6db9 100644
--- a/target-unicore32/softmmu.c
+++ b/target-unicore32/softmmu.c
@@ -121,6 +121,7 @@ static int get_phys_addr_ucv2(CPUUniCore32State *env, uint32_t address,
int access_type, int is_user, uint32_t *phys_ptr, int *prot,
target_ulong *page_size)
{
+ CPUState *cs = ENV_GET_CPU(env);
int code;
uint32_t table;
uint32_t desc;
@@ -130,7 +131,7 @@ static int get_phys_addr_ucv2(CPUUniCore32State *env, uint32_t address,
/* Lookup l1 descriptor. */
table = env->cp0.c2_base & 0xfffff000;
table |= (address >> 20) & 0xffc;
- desc = ldl_phys(table);
+ desc = ldl_phys(cs->as, table);
code = 0;
switch (PAGETABLE_TYPE(desc)) {
case 3:
@@ -152,7 +153,7 @@ static int get_phys_addr_ucv2(CPUUniCore32State *env, uint32_t address,
goto do_fault;
}
table = (desc & 0xfffff000) | ((address >> 10) & 0xffc);
- desc = ldl_phys(table);
+ desc = ldl_phys(cs->as, table);
/* 4k page. */
if (is_user) {
DPRINTF("PTE address %x, desc %x\n", table, desc);
diff --git a/target-xtensa/helper.c b/target-xtensa/helper.c
index a0f9993b2d..60cb055a93 100644
--- a/target-xtensa/helper.c
+++ b/target-xtensa/helper.c
@@ -552,6 +552,7 @@ static int get_physical_addr_mmu(CPUXtensaState *env, bool update_tlb,
static int get_pte(CPUXtensaState *env, uint32_t vaddr, uint32_t *pte)
{
+ CPUState *cs = ENV_GET_CPU(env);
uint32_t paddr;
uint32_t page_size;
unsigned access;
@@ -564,7 +565,7 @@ static int get_pte(CPUXtensaState *env, uint32_t vaddr, uint32_t *pte)
vaddr, ret ? ~0 : paddr);
if (ret == 0) {
- *pte = ldl_phys(paddr);
+ *pte = ldl_phys(cs->as, paddr);
}
return ret;
}
diff --git a/target-xtensa/op_helper.c b/target-xtensa/op_helper.c
index cf970257db..89a72b5678 100644
--- a/target-xtensa/op_helper.c
+++ b/target-xtensa/op_helper.c
@@ -29,6 +29,7 @@
#include "helper.h"
#include "qemu/host-utils.h"
#include "exec/softmmu_exec.h"
+#include "exec/address-spaces.h"
static void do_unaligned_access(CPUXtensaState *env,
target_ulong addr, int is_write, int is_user, uintptr_t retaddr);
@@ -90,7 +91,7 @@ static void tb_invalidate_virtual_addr(CPUXtensaState *env, uint32_t vaddr)
int ret = xtensa_get_physical_addr(env, false, vaddr, 2, 0,
&paddr, &page_size, &access);
if (ret == 0) {
- tb_invalidate_phys_addr(paddr);
+ tb_invalidate_phys_addr(&address_space_memory, paddr);
}
}
diff --git a/tcg/tcg.h b/tcg/tcg.h
index c72af6cfb7..f7efcb4202 100644
--- a/tcg/tcg.h
+++ b/tcg/tcg.h
@@ -324,13 +324,16 @@ typedef int TCGv_i64;
#define TCGV_EQUAL_I32(a, b) (GET_TCGV_I32(a) == GET_TCGV_I32(b))
#define TCGV_EQUAL_I64(a, b) (GET_TCGV_I64(a) == GET_TCGV_I64(b))
+#define TCGV_EQUAL_PTR(a, b) (GET_TCGV_PTR(a) == GET_TCGV_PTR(b))
/* Dummy definition to avoid compiler warnings. */
#define TCGV_UNUSED_I32(x) x = MAKE_TCGV_I32(-1)
#define TCGV_UNUSED_I64(x) x = MAKE_TCGV_I64(-1)
+#define TCGV_UNUSED_PTR(x) x = MAKE_TCGV_PTR(-1)
#define TCGV_IS_UNUSED_I32(x) (GET_TCGV_I32(x) == -1)
#define TCGV_IS_UNUSED_I64(x) (GET_TCGV_I64(x) == -1)
+#define TCGV_IS_UNUSED_PTR(x) (GET_TCGV_PTR(x) == -1)
/* call flags */
/* Helper does not read globals (either directly or through an exception). It
diff --git a/translate-all.c b/translate-all.c
index 543e1ffe77..1ac0246dab 100644
--- a/translate-all.c
+++ b/translate-all.c
@@ -1357,13 +1357,13 @@ static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
}
#if defined(TARGET_HAS_ICE) && !defined(CONFIG_USER_ONLY)
-void tb_invalidate_phys_addr(hwaddr addr)
+void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
{
ram_addr_t ram_addr;
MemoryRegion *mr;
hwaddr l = 1;
- mr = address_space_translate(&address_space_memory, addr, &addr, &l, false);
+ mr = address_space_translate(as, addr, &addr, &l, false);
if (!(memory_region_is_ram(mr)
|| memory_region_is_romd(mr))) {
return;
diff --git a/util/fifo8.c b/util/fifo8.c
index 013e903c6e..6a43482c9e 100644
--- a/util/fifo8.c
+++ b/util/fifo8.c
@@ -37,6 +37,27 @@ void fifo8_push(Fifo8 *fifo, uint8_t data)
fifo->num++;
}
+void fifo8_push_all(Fifo8 *fifo, const uint8_t *data, uint32_t num)
+{
+ uint32_t start, avail;
+
+ if (fifo->num + num > fifo->capacity) {
+ abort();
+ }
+
+ start = (fifo->head + fifo->num) % fifo->capacity;
+
+ if (start + num <= fifo->capacity) {
+ memcpy(&fifo->data[start], data, num);
+ } else {
+ avail = fifo->capacity - start;
+ memcpy(&fifo->data[start], data, avail);
+ memcpy(&fifo->data[0], &data[avail], num - avail);
+ }
+
+ fifo->num += num;
+}
+
uint8_t fifo8_pop(Fifo8 *fifo)
{
uint8_t ret;
@@ -50,9 +71,25 @@ uint8_t fifo8_pop(Fifo8 *fifo)
return ret;
}
+const uint8_t *fifo8_pop_buf(Fifo8 *fifo, uint32_t max, uint32_t *num)
+{
+ uint8_t *ret;
+
+ if (max == 0 || max > fifo->num) {
+ abort();
+ }
+ *num = MIN(fifo->capacity - fifo->head, max);
+ ret = &fifo->data[fifo->head];
+ fifo->head += *num;
+ fifo->head %= fifo->capacity;
+ fifo->num -= *num;
+ return ret;
+}
+
void fifo8_reset(Fifo8 *fifo)
{
fifo->num = 0;
+ fifo->head = 0;
}
bool fifo8_is_empty(Fifo8 *fifo)
@@ -65,6 +102,16 @@ bool fifo8_is_full(Fifo8 *fifo)
return (fifo->num == fifo->capacity);
}
+uint32_t fifo8_num_free(Fifo8 *fifo)
+{
+ return fifo->capacity - fifo->num;
+}
+
+uint32_t fifo8_num_used(Fifo8 *fifo)
+{
+ return fifo->num;
+}
+
const VMStateDescription vmstate_fifo8 = {
.name = "Fifo8",
.version_id = 1,